Index: stable/11/sys/dev/cxgbe/adapter.h =================================================================== --- stable/11/sys/dev/cxgbe/adapter.h (revision 346966) +++ stable/11/sys/dev/cxgbe/adapter.h (revision 346967) @@ -1,1291 +1,1293 @@ /*- * Copyright (c) 2011 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef __T4_ADAPTER_H__ #define __T4_ADAPTER_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "offload.h" #include "t4_ioctl.h" #include "common/t4_msg.h" #include "firmware/t4fw_interface.h" #define KTR_CXGBE KTR_SPARE3 MALLOC_DECLARE(M_CXGBE); #define CXGBE_UNIMPLEMENTED(s) \ panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__) #if defined(__i386__) || defined(__amd64__) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } #else #define prefetch(x) __builtin_prefetch(x) #endif #ifndef SYSCTL_ADD_UQUAD #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD #define sysctl_handle_64 sysctl_handle_quad #define CTLTYPE_U64 CTLTYPE_QUAD #endif SYSCTL_DECL(_hw_cxgbe); struct adapter; typedef struct adapter adapter_t; enum { /* * All ingress queues use this entry size. Note that the firmware event * queue and any iq expecting CPL_RX_PKT in the descriptor needs this to * be at least 64. */ IQ_ESIZE = 64, /* Default queue sizes for all kinds of ingress queues */ FW_IQ_QSIZE = 256, RX_IQ_QSIZE = 1024, /* All egress queues use this entry size */ EQ_ESIZE = 64, /* Default queue sizes for all kinds of egress queues */ CTRL_EQ_QSIZE = 1024, TX_EQ_QSIZE = 1024, #if MJUMPAGESIZE != MCLBYTES SW_ZONE_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */ #else SW_ZONE_SIZES = 3, /* cluster, jumbo9k, jumbo16k */ #endif CL_METADATA_SIZE = CACHE_LINE_SIZE, SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / EQ_ESIZE, /* max WR size in desc */ TX_SGL_SEGS = 39, TX_SGL_SEGS_TSO = 38, TX_SGL_SEGS_EO_TSO = 30, /* XXX: lower for IPv6. */ TX_WR_FLITS = SGE_MAX_WR_LEN / 8 }; enum { /* adapter intr_type */ INTR_INTX = (1 << 0), INTR_MSI = (1 << 1), INTR_MSIX = (1 << 2) }; enum { XGMAC_MTU = (1 << 0), XGMAC_PROMISC = (1 << 1), XGMAC_ALLMULTI = (1 << 2), XGMAC_VLANEX = (1 << 3), XGMAC_UCADDR = (1 << 4), XGMAC_MCADDRS = (1 << 5), XGMAC_ALL = 0xffff }; enum { /* flags understood by begin_synchronized_op */ HOLD_LOCK = (1 << 0), SLEEP_OK = (1 << 1), INTR_OK = (1 << 2), /* flags understood by end_synchronized_op */ LOCK_HELD = HOLD_LOCK, }; enum { /* adapter flags */ FULL_INIT_DONE = (1 << 0), FW_OK = (1 << 1), CHK_MBOX_ACCESS = (1 << 2), MASTER_PF = (1 << 3), ADAP_SYSCTL_CTX = (1 << 4), ADAP_ERR = (1 << 5), BUF_PACKING_OK = (1 << 6), IS_VF = (1 << 7), CXGBE_BUSY = (1 << 9), /* port flags */ HAS_TRACEQ = (1 << 3), FIXED_IFMEDIA = (1 << 4), /* ifmedia list doesn't change. */ /* VI flags */ DOOMED = (1 << 0), VI_INIT_DONE = (1 << 1), VI_SYSCTL_CTX = (1 << 2), /* adapter debug_flags */ DF_DUMP_MBOX = (1 << 0), /* Log all mbox cmd/rpl. */ DF_LOAD_FW_ANYTIME = (1 << 1), /* Allow LOAD_FW after init */ DF_DISABLE_TCB_CACHE = (1 << 2), /* Disable TCB cache (T6+) */ DF_DISABLE_CFG_RETRY = (1 << 3), /* Disable fallback config */ DF_VERBOSE_SLOWINTR = (1 << 4), /* Chatty slow intr handler */ }; #define IS_DOOMED(vi) ((vi)->flags & DOOMED) #define SET_DOOMED(vi) do {(vi)->flags |= DOOMED;} while (0) #define IS_BUSY(sc) ((sc)->flags & CXGBE_BUSY) #define SET_BUSY(sc) do {(sc)->flags |= CXGBE_BUSY;} while (0) #define CLR_BUSY(sc) do {(sc)->flags &= ~CXGBE_BUSY;} while (0) struct vi_info { device_t dev; struct port_info *pi; struct ifnet *ifp; unsigned long flags; int if_flags; uint16_t *rss, *nm_rss; - int smt_idx; /* for convenience */ - uint16_t viid; + uint16_t viid; /* opaque VI identifier */ + uint16_t smt_idx; + uint16_t vin; + uint8_t vfvld; int16_t xact_addr_filt;/* index of exact MAC address filter */ uint16_t rss_size; /* size of VI's RSS table slice */ uint16_t rss_base; /* start of VI's RSS table slice */ int nintr; int first_intr; /* These need to be int as they are used in sysctl */ int ntxq; /* # of tx queues */ int first_txq; /* index of first tx queue */ int rsrv_noflowq; /* Reserve queue 0 for non-flowid packets */ int nrxq; /* # of rx queues */ int first_rxq; /* index of first rx queue */ int nofldtxq; /* # of offload tx queues */ int first_ofld_txq; /* index of first offload tx queue */ int nofldrxq; /* # of offload rx queues */ int first_ofld_rxq; /* index of first offload rx queue */ int nnmtxq; int first_nm_txq; int nnmrxq; int first_nm_rxq; int tmr_idx; int ofld_tmr_idx; int pktc_idx; int ofld_pktc_idx; int qsize_rxq; int qsize_txq; struct timeval last_refreshed; struct fw_vi_stats_vf stats; struct callout tick; struct sysctl_ctx_list ctx; /* from ifconfig up to driver detach */ uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */ }; struct tx_ch_rl_params { enum fw_sched_params_rate ratemode; /* %port (REL) or kbps (ABS) */ uint32_t maxrate; }; enum { CLRL_USER = (1 << 0), /* allocated manually. */ CLRL_SYNC = (1 << 1), /* sync hw update in progress. */ CLRL_ASYNC = (1 << 2), /* async hw update requested. */ CLRL_ERR = (1 << 3), /* last hw setup ended in error. */ }; struct tx_cl_rl_params { int refcount; uint8_t flags; enum fw_sched_params_rate ratemode; /* %port REL or ABS value */ enum fw_sched_params_unit rateunit; /* kbps or pps (when ABS) */ enum fw_sched_params_mode mode; /* aggr or per-flow */ uint32_t maxrate; uint16_t pktsize; uint16_t burstsize; }; /* Tx scheduler parameters for a channel/port */ struct tx_sched_params { /* Channel Rate Limiter */ struct tx_ch_rl_params ch_rl; /* Class WRR */ /* XXX */ /* Class Rate Limiter (including the default pktsize and burstsize). */ int pktsize; int burstsize; struct tx_cl_rl_params cl_rl[]; }; struct port_info { device_t dev; struct adapter *adapter; struct vi_info *vi; int nvi; int up_vis; int uld_vis; struct tx_sched_params *sched_params; struct mtx pi_lock; char lockname[16]; unsigned long flags; uint8_t lport; /* associated offload logical port */ int8_t mdio_addr; uint8_t port_type; uint8_t mod_type; uint8_t port_id; uint8_t tx_chan; uint8_t mps_bg_map; /* rx MPS buffer group bitmap */ uint8_t rx_e_chan_map; /* rx TP e-channel bitmap */ struct link_config link_cfg; struct ifmedia media; struct timeval last_refreshed; struct port_stats stats; u_int tnl_cong_drops; u_int tx_parse_error; u_long tx_tls_records; u_long tx_tls_octets; u_long rx_tls_records; u_long rx_tls_octets; struct callout tick; }; #define IS_MAIN_VI(vi) ((vi) == &((vi)->pi->vi[0])) /* Where the cluster came from, how it has been carved up. */ struct cluster_layout { int8_t zidx; int8_t hwidx; uint16_t region1; /* mbufs laid out within this region */ /* region2 is the DMA region */ uint16_t region3; /* cluster_metadata within this region */ }; struct cluster_metadata { u_int refcount; struct fl_sdesc *sd; /* For debug only. Could easily be stale */ }; struct fl_sdesc { caddr_t cl; uint16_t nmbuf; /* # of driver originated mbufs with ref on cluster */ struct cluster_layout cll; }; struct tx_desc { __be64 flit[8]; }; struct tx_sdesc { struct mbuf *m; /* m_nextpkt linked chain of frames */ uint8_t desc_used; /* # of hardware descriptors used by the WR */ }; #define IQ_PAD (IQ_ESIZE - sizeof(struct rsp_ctrl) - sizeof(struct rss_header)) struct iq_desc { struct rss_header rss; uint8_t cpl[IQ_PAD]; struct rsp_ctrl rsp; }; #undef IQ_PAD CTASSERT(sizeof(struct iq_desc) == IQ_ESIZE); enum { /* iq flags */ IQ_ALLOCATED = (1 << 0), /* firmware resources allocated */ IQ_HAS_FL = (1 << 1), /* iq associated with a freelist */ /* 1 << 2 Used to be IQ_INTR */ IQ_LRO_ENABLED = (1 << 3), /* iq is an eth rxq with LRO enabled */ IQ_ADJ_CREDIT = (1 << 4), /* hw is off by 1 credit for this iq */ /* iq state */ IQS_DISABLED = 0, IQS_BUSY = 1, IQS_IDLE = 2, /* netmap related flags */ NM_OFF = 0, NM_ON = 1, NM_BUSY = 2, }; enum { CPL_COOKIE_RESERVED = 0, CPL_COOKIE_FILTER, CPL_COOKIE_DDP0, CPL_COOKIE_DDP1, CPL_COOKIE_TOM, CPL_COOKIE_HASHFILTER, CPL_COOKIE_ETHOFLD, CPL_COOKIE_AVAILABLE3, NUM_CPL_COOKIES = 8 /* Limited by M_COOKIE. Do not increase. */ }; struct sge_iq; struct rss_header; typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *, struct mbuf *); typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *); typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *); /* * Ingress Queue: T4 is producer, driver is consumer. */ struct sge_iq { uint32_t flags; volatile int state; struct adapter *adapter; struct iq_desc *desc; /* KVA of descriptor ring */ int8_t intr_pktc_idx; /* packet count threshold index */ uint8_t gen; /* generation bit */ uint8_t intr_params; /* interrupt holdoff parameters */ uint8_t intr_next; /* XXX: holdoff for next interrupt */ uint16_t qsize; /* size (# of entries) of the queue */ uint16_t sidx; /* index of the entry with the status page */ uint16_t cidx; /* consumer index */ uint16_t cntxt_id; /* SGE context id for the iq */ uint16_t abs_id; /* absolute SGE id for the iq */ STAILQ_ENTRY(sge_iq) link; bus_dma_tag_t desc_tag; bus_dmamap_t desc_map; bus_addr_t ba; /* bus address of descriptor ring */ }; enum { EQ_CTRL = 1, EQ_ETH = 2, EQ_OFLD = 3, /* eq flags */ EQ_TYPEMASK = 0x3, /* 2 lsbits hold the type (see above) */ EQ_ALLOCATED = (1 << 2), /* firmware resources allocated */ EQ_ENABLED = (1 << 3), /* open for business */ EQ_QFLUSH = (1 << 4), /* if_qflush in progress */ }; /* Listed in order of preference. Update t4_sysctls too if you change these */ enum {DOORBELL_UDB, DOORBELL_WCWR, DOORBELL_UDBWC, DOORBELL_KDB}; /* * Egress Queue: driver is producer, T4 is consumer. * * Note: A free list is an egress queue (driver produces the buffers and T4 * consumes them) but it's special enough to have its own struct (see sge_fl). */ struct sge_eq { unsigned int flags; /* MUST be first */ unsigned int cntxt_id; /* SGE context id for the eq */ unsigned int abs_id; /* absolute SGE id for the eq */ struct mtx eq_lock; struct tx_desc *desc; /* KVA of descriptor ring */ uint8_t doorbells; volatile uint32_t *udb; /* KVA of doorbell (lies within BAR2) */ u_int udb_qid; /* relative qid within the doorbell page */ uint16_t sidx; /* index of the entry with the status page */ uint16_t cidx; /* consumer idx (desc idx) */ uint16_t pidx; /* producer idx (desc idx) */ uint16_t equeqidx; /* EQUEQ last requested at this pidx */ uint16_t dbidx; /* pidx of the most recent doorbell */ uint16_t iqid; /* iq that gets egr_update for the eq */ uint8_t tx_chan; /* tx channel used by the eq */ volatile u_int equiq; /* EQUIQ outstanding */ bus_dma_tag_t desc_tag; bus_dmamap_t desc_map; bus_addr_t ba; /* bus address of descriptor ring */ char lockname[16]; }; struct sw_zone_info { uma_zone_t zone; /* zone that this cluster comes from */ int size; /* size of cluster: 2K, 4K, 9K, 16K, etc. */ int type; /* EXT_xxx type of the cluster */ int8_t head_hwidx; int8_t tail_hwidx; }; struct hw_buf_info { int8_t zidx; /* backpointer to zone; -ve means unused */ int8_t next; /* next hwidx for this zone; -1 means no more */ int size; }; enum { NUM_MEMWIN = 3, MEMWIN0_APERTURE = 2048, MEMWIN0_BASE = 0x1b800, MEMWIN1_APERTURE = 32768, MEMWIN1_BASE = 0x28000, MEMWIN2_APERTURE_T4 = 65536, MEMWIN2_BASE_T4 = 0x30000, MEMWIN2_APERTURE_T5 = 128 * 1024, MEMWIN2_BASE_T5 = 0x60000, }; struct memwin { struct rwlock mw_lock __aligned(CACHE_LINE_SIZE); uint32_t mw_base; /* constant after setup_memwin */ uint32_t mw_aperture; /* ditto */ uint32_t mw_curpos; /* protected by mw_lock */ }; enum { FL_STARVING = (1 << 0), /* on the adapter's list of starving fl's */ FL_DOOMED = (1 << 1), /* about to be destroyed */ FL_BUF_PACKING = (1 << 2), /* buffer packing enabled */ FL_BUF_RESUME = (1 << 3), /* resume from the middle of the frame */ }; #define FL_RUNNING_LOW(fl) \ (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) <= fl->lowat) #define FL_NOT_RUNNING_LOW(fl) \ (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) >= 2 * fl->lowat) struct sge_fl { struct mtx fl_lock; __be64 *desc; /* KVA of descriptor ring, ptr to addresses */ struct fl_sdesc *sdesc; /* KVA of software descriptor ring */ struct cluster_layout cll_def; /* default refill zone, layout */ uint16_t lowat; /* # of buffers <= this means fl needs help */ int flags; uint16_t buf_boundary; /* The 16b idx all deal with hw descriptors */ uint16_t dbidx; /* hw pidx after last doorbell */ uint16_t sidx; /* index of status page */ volatile uint16_t hw_cidx; /* The 32b idx are all buffer idx, not hardware descriptor idx */ uint32_t cidx; /* consumer index */ uint32_t pidx; /* producer index */ uint32_t dbval; u_int rx_offset; /* offset in fl buf (when buffer packing) */ volatile uint32_t *udb; uint64_t mbuf_allocated;/* # of mbuf allocated from zone_mbuf */ uint64_t mbuf_inlined; /* # of mbuf created within clusters */ uint64_t cl_allocated; /* # of clusters allocated */ uint64_t cl_recycled; /* # of clusters recycled */ uint64_t cl_fast_recycled; /* # of clusters recycled (fast) */ /* These 3 are valid when FL_BUF_RESUME is set, stale otherwise. */ struct mbuf *m0; struct mbuf **pnext; u_int remaining; uint16_t qsize; /* # of hw descriptors (status page included) */ uint16_t cntxt_id; /* SGE context id for the freelist */ TAILQ_ENTRY(sge_fl) link; /* All starving freelists */ bus_dma_tag_t desc_tag; bus_dmamap_t desc_map; char lockname[16]; bus_addr_t ba; /* bus address of descriptor ring */ struct cluster_layout cll_alt; /* alternate refill zone, layout */ }; struct mp_ring; /* txq: SGE egress queue + what's needed for Ethernet NIC */ struct sge_txq { struct sge_eq eq; /* MUST be first */ struct ifnet *ifp; /* the interface this txq belongs to */ struct mp_ring *r; /* tx software ring */ struct tx_sdesc *sdesc; /* KVA of software descriptor ring */ struct sglist *gl; __be32 cpl_ctrl0; /* for convenience */ int tc_idx; /* traffic class */ struct task tx_reclaim_task; /* stats for common events first */ uint64_t txcsum; /* # of times hardware assisted with checksum */ uint64_t tso_wrs; /* # of TSO work requests */ uint64_t vlan_insertion;/* # of times VLAN tag was inserted */ uint64_t imm_wrs; /* # of work requests with immediate data */ uint64_t sgl_wrs; /* # of work requests with direct SGL */ uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */ uint64_t txpkts0_wrs; /* # of type0 coalesced tx work requests */ uint64_t txpkts1_wrs; /* # of type1 coalesced tx work requests */ uint64_t txpkts0_pkts; /* # of frames in type0 coalesced tx WRs */ uint64_t txpkts1_pkts; /* # of frames in type1 coalesced tx WRs */ /* stats for not-that-common events */ } __aligned(CACHE_LINE_SIZE); /* rxq: SGE ingress queue + SGE free list + miscellaneous items */ struct sge_rxq { struct sge_iq iq; /* MUST be first */ struct sge_fl fl; /* MUST follow iq */ struct ifnet *ifp; /* the interface this rxq belongs to */ #if defined(INET) || defined(INET6) struct lro_ctrl lro; /* LRO state */ #endif /* stats for common events first */ uint64_t rxcsum; /* # of times hardware assisted with checksum */ uint64_t vlan_extraction;/* # of times VLAN tag was extracted */ /* stats for not-that-common events */ } __aligned(CACHE_LINE_SIZE); static inline struct sge_rxq * iq_to_rxq(struct sge_iq *iq) { return (__containerof(iq, struct sge_rxq, iq)); } /* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */ struct sge_ofld_rxq { struct sge_iq iq; /* MUST be first */ struct sge_fl fl; /* MUST follow iq */ } __aligned(CACHE_LINE_SIZE); static inline struct sge_ofld_rxq * iq_to_ofld_rxq(struct sge_iq *iq) { return (__containerof(iq, struct sge_ofld_rxq, iq)); } struct wrqe { STAILQ_ENTRY(wrqe) link; struct sge_wrq *wrq; int wr_len; char wr[] __aligned(16); }; struct wrq_cookie { TAILQ_ENTRY(wrq_cookie) link; int ndesc; int pidx; }; /* * wrq: SGE egress queue that is given prebuilt work requests. Both the control * and offload tx queues are of this type. */ struct sge_wrq { struct sge_eq eq; /* MUST be first */ struct adapter *adapter; struct task wrq_tx_task; /* Tx desc reserved but WR not "committed" yet. */ TAILQ_HEAD(wrq_incomplete_wrs , wrq_cookie) incomplete_wrs; /* List of WRs ready to go out as soon as descriptors are available. */ STAILQ_HEAD(, wrqe) wr_list; u_int nwr_pending; u_int ndesc_needed; /* stats for common events first */ uint64_t tx_wrs_direct; /* # of WRs written directly to desc ring. */ uint64_t tx_wrs_ss; /* # of WRs copied from scratch space. */ uint64_t tx_wrs_copied; /* # of WRs queued and copied to desc ring. */ /* stats for not-that-common events */ /* * Scratch space for work requests that wrap around after reaching the * status page, and some information about the last WR that used it. */ uint16_t ss_pidx; uint16_t ss_len; uint8_t ss[SGE_MAX_WR_LEN]; } __aligned(CACHE_LINE_SIZE); struct sge_nm_rxq { volatile int nm_state; /* NM_OFF, NM_ON, or NM_BUSY */ struct vi_info *vi; struct iq_desc *iq_desc; uint16_t iq_abs_id; uint16_t iq_cntxt_id; uint16_t iq_cidx; uint16_t iq_sidx; uint8_t iq_gen; __be64 *fl_desc; uint16_t fl_cntxt_id; uint32_t fl_cidx; uint32_t fl_pidx; uint32_t fl_sidx; uint32_t fl_db_val; u_int fl_hwidx:4; u_int nid; /* netmap ring # for this queue */ /* infrequently used items after this */ bus_dma_tag_t iq_desc_tag; bus_dmamap_t iq_desc_map; bus_addr_t iq_ba; int intr_idx; bus_dma_tag_t fl_desc_tag; bus_dmamap_t fl_desc_map; bus_addr_t fl_ba; } __aligned(CACHE_LINE_SIZE); struct sge_nm_txq { struct tx_desc *desc; uint16_t cidx; uint16_t pidx; uint16_t sidx; uint16_t equiqidx; /* EQUIQ last requested at this pidx */ uint16_t equeqidx; /* EQUEQ last requested at this pidx */ uint16_t dbidx; /* pidx of the most recent doorbell */ uint8_t doorbells; volatile uint32_t *udb; u_int udb_qid; u_int cntxt_id; __be32 cpl_ctrl0; /* for convenience */ u_int nid; /* netmap ring # for this queue */ /* infrequently used items after this */ bus_dma_tag_t desc_tag; bus_dmamap_t desc_map; bus_addr_t ba; int iqidx; } __aligned(CACHE_LINE_SIZE); struct sge { int nrxq; /* total # of Ethernet rx queues */ int ntxq; /* total # of Ethernet tx queues */ int nofldrxq; /* total # of TOE rx queues */ int nofldtxq; /* total # of TOE tx queues */ int nnmrxq; /* total # of netmap rx queues */ int nnmtxq; /* total # of netmap tx queues */ int niq; /* total # of ingress queues */ int neq; /* total # of egress queues */ struct sge_iq fwq; /* Firmware event queue */ struct sge_wrq *ctrlq; /* Control queues */ struct sge_txq *txq; /* NIC tx queues */ struct sge_rxq *rxq; /* NIC rx queues */ struct sge_wrq *ofld_txq; /* TOE tx queues */ struct sge_ofld_rxq *ofld_rxq; /* TOE rx queues */ struct sge_nm_txq *nm_txq; /* netmap tx queues */ struct sge_nm_rxq *nm_rxq; /* netmap rx queues */ uint16_t iq_start; /* first cntxt_id */ uint16_t iq_base; /* first abs_id */ int eq_start; /* first cntxt_id */ int eq_base; /* first abs_id */ struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */ struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */ int8_t safe_hwidx1; /* may not have room for metadata */ int8_t safe_hwidx2; /* with room for metadata and maybe more */ struct sw_zone_info sw_zone_info[SW_ZONE_SIZES]; struct hw_buf_info hw_buf_info[SGE_FLBUF_SIZES]; }; struct devnames { const char *nexus_name; const char *ifnet_name; const char *vi_ifnet_name; const char *pf03_drv_name; const char *vf_nexus_name; const char *vf_ifnet_name; }; struct clip_entry; struct adapter { SLIST_ENTRY(adapter) link; device_t dev; struct cdev *cdev; const struct devnames *names; /* PCIe register resources */ int regs_rid; struct resource *regs_res; int msix_rid; struct resource *msix_res; bus_space_handle_t bh; bus_space_tag_t bt; bus_size_t mmio_len; int udbs_rid; struct resource *udbs_res; volatile uint8_t *udbs_base; unsigned int pf; unsigned int mbox; unsigned int vpd_busy; unsigned int vpd_flag; /* Interrupt information */ int intr_type; int intr_count; struct irq { struct resource *res; int rid; void *tag; struct sge_rxq *rxq; struct sge_nm_rxq *nm_rxq; } __aligned(CACHE_LINE_SIZE) *irq; int sge_gts_reg; int sge_kdoorbell_reg; bus_dma_tag_t dmat; /* Parent DMA tag */ struct sge sge; int lro_timeout; int sc_do_rxcopy; struct taskqueue *tq[MAX_NCHAN]; /* General purpose taskqueues */ struct port_info *port[MAX_NPORTS]; uint8_t chan_map[MAX_NCHAN]; /* channel -> port */ struct mtx clip_table_lock; TAILQ_HEAD(, clip_entry) clip_table; int clip_gen; void *tom_softc; /* (struct tom_data *) */ struct tom_tunables tt; struct t4_offload_policy *policy; struct rwlock policy_lock; void *iwarp_softc; /* (struct c4iw_dev *) */ struct iw_tunables iwt; void *iscsi_ulp_softc; /* (struct cxgbei_data *) */ void *ccr_softc; /* (struct ccr_softc *) */ struct l2t_data *l2t; /* L2 table */ struct smt_data *smt; /* Source MAC Table */ struct tid_info tids; vmem_t *key_map; uint8_t doorbells; int offload_map; /* ports with IFCAP_TOE enabled */ int active_ulds; /* ULDs activated on this adapter */ int flags; int debug_flags; char ifp_lockname[16]; struct mtx ifp_lock; struct ifnet *ifp; /* tracer ifp */ struct ifmedia media; int traceq; /* iq used by all tracers, -1 if none */ int tracer_valid; /* bitmap of valid tracers */ int tracer_enabled; /* bitmap of enabled tracers */ char fw_version[16]; char tp_version[16]; char er_version[16]; char bs_version[16]; char cfg_file[32]; u_int cfcsum; struct adapter_params params; const struct chip_params *chip_params; struct t4_virt_res vres; uint16_t nbmcaps; uint16_t linkcaps; uint16_t switchcaps; uint16_t niccaps; uint16_t toecaps; uint16_t rdmacaps; uint16_t cryptocaps; uint16_t iscsicaps; uint16_t fcoecaps; struct sysctl_ctx_list ctx; /* from adapter_full_init to full_uninit */ struct mtx sc_lock; char lockname[16]; /* Starving free lists */ struct mtx sfl_lock; /* same cache-line as sc_lock? but that's ok */ TAILQ_HEAD(, sge_fl) sfl; struct callout sfl_callout; struct mtx reg_lock; /* for indirect register access */ struct memwin memwin[NUM_MEMWIN]; /* memory windows */ struct mtx tc_lock; struct task tc_task; const char *last_op; const void *last_op_thr; int last_op_flags; int swintr; }; #define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock) #define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock) #define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED) #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED) #define ASSERT_SYNCHRONIZED_OP(sc) \ KASSERT(IS_BUSY(sc) && \ (mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \ ("%s: operation not synchronized.", __func__)) #define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock) #define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock) #define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED) #define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED) #define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock) #define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock) #define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock) #define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED) #define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED) #define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl) #define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl) #define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl) #define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl) #define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock) #define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock) #define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock) #define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED) #define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED) #define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq) #define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq) #define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq) #define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq) #define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq) #define for_each_txq(vi, iter, q) \ for (q = &vi->pi->adapter->sge.txq[vi->first_txq], iter = 0; \ iter < vi->ntxq; ++iter, ++q) #define for_each_rxq(vi, iter, q) \ for (q = &vi->pi->adapter->sge.rxq[vi->first_rxq], iter = 0; \ iter < vi->nrxq; ++iter, ++q) #define for_each_ofld_txq(vi, iter, q) \ for (q = &vi->pi->adapter->sge.ofld_txq[vi->first_ofld_txq], iter = 0; \ iter < vi->nofldtxq; ++iter, ++q) #define for_each_ofld_rxq(vi, iter, q) \ for (q = &vi->pi->adapter->sge.ofld_rxq[vi->first_ofld_rxq], iter = 0; \ iter < vi->nofldrxq; ++iter, ++q) #define for_each_nm_txq(vi, iter, q) \ for (q = &vi->pi->adapter->sge.nm_txq[vi->first_nm_txq], iter = 0; \ iter < vi->nnmtxq; ++iter, ++q) #define for_each_nm_rxq(vi, iter, q) \ for (q = &vi->pi->adapter->sge.nm_rxq[vi->first_nm_rxq], iter = 0; \ iter < vi->nnmrxq; ++iter, ++q) #define for_each_vi(_pi, _iter, _vi) \ for ((_vi) = (_pi)->vi, (_iter) = 0; (_iter) < (_pi)->nvi; \ ++(_iter), ++(_vi)) #define IDXINCR(idx, incr, wrap) do { \ idx = wrap - idx > incr ? idx + incr : incr - (wrap - idx); \ } while (0) #define IDXDIFF(head, tail, wrap) \ ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) /* One for errors, one for firmware events */ #define T4_EXTRA_INTR 2 /* One for firmware events */ #define T4VF_EXTRA_INTR 1 static inline int forwarding_intr_to_fwq(struct adapter *sc) { return (sc->intr_count == 1); } static inline uint32_t t4_read_reg(struct adapter *sc, uint32_t reg) { return bus_space_read_4(sc->bt, sc->bh, reg); } static inline void t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val) { bus_space_write_4(sc->bt, sc->bh, reg, val); } static inline uint64_t t4_read_reg64(struct adapter *sc, uint32_t reg) { #ifdef __LP64__ return bus_space_read_8(sc->bt, sc->bh, reg); #else return (uint64_t)bus_space_read_4(sc->bt, sc->bh, reg) + ((uint64_t)bus_space_read_4(sc->bt, sc->bh, reg + 4) << 32); #endif } static inline void t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val) { #ifdef __LP64__ bus_space_write_8(sc->bt, sc->bh, reg, val); #else bus_space_write_4(sc->bt, sc->bh, reg, val); bus_space_write_4(sc->bt, sc->bh, reg + 4, val>> 32); #endif } static inline void t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val) { *val = pci_read_config(sc->dev, reg, 1); } static inline void t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val) { pci_write_config(sc->dev, reg, val, 1); } static inline void t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val) { *val = pci_read_config(sc->dev, reg, 2); } static inline void t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val) { pci_write_config(sc->dev, reg, val, 2); } static inline void t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val) { *val = pci_read_config(sc->dev, reg, 4); } static inline void t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val) { pci_write_config(sc->dev, reg, val, 4); } static inline struct port_info * adap2pinfo(struct adapter *sc, int idx) { return (sc->port[idx]); } static inline void t4_os_set_hw_addr(struct port_info *pi, uint8_t hw_addr[]) { bcopy(hw_addr, pi->vi[0].hw_addr, ETHER_ADDR_LEN); } static inline int tx_resume_threshold(struct sge_eq *eq) { /* not quite the same as qsize / 4, but this will do. */ return (eq->sidx / 4); } static inline int t4_use_ldst(struct adapter *sc) { #ifdef notyet return (sc->flags & FW_OK || !sc->use_bd); #else return (0); #endif } static inline void CH_DUMP_MBOX(struct adapter *sc, int mbox, const int reg, const char *msg, const __be64 *const p, const bool err) { if (!(sc->debug_flags & DF_DUMP_MBOX) && !err) return; if (p != NULL) { log(err ? LOG_ERR : LOG_DEBUG, "%s: mbox %u %s %016llx %016llx %016llx %016llx " "%016llx %016llx %016llx %016llx\n", device_get_nameunit(sc->dev), mbox, msg, (long long)be64_to_cpu(p[0]), (long long)be64_to_cpu(p[1]), (long long)be64_to_cpu(p[2]), (long long)be64_to_cpu(p[3]), (long long)be64_to_cpu(p[4]), (long long)be64_to_cpu(p[5]), (long long)be64_to_cpu(p[6]), (long long)be64_to_cpu(p[7])); } else { log(err ? LOG_ERR : LOG_DEBUG, "%s: mbox %u %s %016llx %016llx %016llx %016llx " "%016llx %016llx %016llx %016llx\n", device_get_nameunit(sc->dev), mbox, msg, (long long)t4_read_reg64(sc, reg), (long long)t4_read_reg64(sc, reg + 8), (long long)t4_read_reg64(sc, reg + 16), (long long)t4_read_reg64(sc, reg + 24), (long long)t4_read_reg64(sc, reg + 32), (long long)t4_read_reg64(sc, reg + 40), (long long)t4_read_reg64(sc, reg + 48), (long long)t4_read_reg64(sc, reg + 56)); } } /* t4_main.c */ extern int t4_ntxq; extern int t4_nrxq; extern int t4_intr_types; extern int t4_tmr_idx; extern int t4_pktc_idx; extern unsigned int t4_qsize_rxq; extern unsigned int t4_qsize_txq; extern device_method_t cxgbe_methods[]; int t4_os_find_pci_capability(struct adapter *, int); int t4_os_pci_save_state(struct adapter *); int t4_os_pci_restore_state(struct adapter *); void t4_os_portmod_changed(struct port_info *); void t4_os_link_changed(struct port_info *); void t4_iterate(void (*)(struct adapter *, void *), void *); void t4_init_devnames(struct adapter *); void t4_add_adapter(struct adapter *); void t4_aes_getdeckey(void *, const void *, unsigned int); int t4_detach_common(device_t); int t4_map_bars_0_and_4(struct adapter *); int t4_map_bar_2(struct adapter *); int t4_setup_intr_handlers(struct adapter *); void t4_sysctls(struct adapter *); int begin_synchronized_op(struct adapter *, struct vi_info *, int, char *); void doom_vi(struct adapter *, struct vi_info *); void end_synchronized_op(struct adapter *, int); int update_mac_settings(struct ifnet *, int); int adapter_full_init(struct adapter *); int adapter_full_uninit(struct adapter *); uint64_t cxgbe_get_counter(struct ifnet *, ift_counter); int vi_full_init(struct vi_info *); int vi_full_uninit(struct vi_info *); void vi_sysctls(struct vi_info *); void vi_tick(void *); int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); int alloc_atid_tab(struct tid_info *, int); void free_atid_tab(struct tid_info *); int alloc_atid(struct adapter *, void *); void *lookup_atid(struct adapter *, int); void free_atid(struct adapter *, int); void release_tid(struct adapter *, int, struct sge_wrq *); int cxgbe_media_change(struct ifnet *); void cxgbe_media_status(struct ifnet *, struct ifmediareq *); bool t4_os_dump_cimla(struct adapter *, int, bool); void t4_os_dump_devlog(struct adapter *); #ifdef DEV_NETMAP /* t4_netmap.c */ struct sge_nm_rxq; void cxgbe_nm_attach(struct vi_info *); void cxgbe_nm_detach(struct vi_info *); void service_nm_rxq(struct sge_nm_rxq *); #endif /* t4_sge.c */ void t4_sge_modload(void); void t4_sge_modunload(void); uint64_t t4_sge_extfree_refs(void); void t4_tweak_chip_settings(struct adapter *); int t4_read_chip_settings(struct adapter *); int t4_create_dma_tag(struct adapter *); void t4_sge_sysctls(struct adapter *, struct sysctl_ctx_list *, struct sysctl_oid_list *); int t4_destroy_dma_tag(struct adapter *); int t4_setup_adapter_queues(struct adapter *); int t4_teardown_adapter_queues(struct adapter *); int t4_setup_vi_queues(struct vi_info *); int t4_teardown_vi_queues(struct vi_info *); void t4_intr_all(void *); void t4_intr(void *); #ifdef DEV_NETMAP void t4_nm_intr(void *); void t4_vi_intr(void *); #endif void t4_intr_err(void *); void t4_intr_evt(void *); void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *); void t4_update_fl_bufsize(struct ifnet *); int parse_pkt(struct adapter *, struct mbuf **); void *start_wrq_wr(struct sge_wrq *, int, struct wrq_cookie *); void commit_wrq_wr(struct sge_wrq *, void *, struct wrq_cookie *); int tnl_cong(struct port_info *, int); void t4_register_an_handler(an_handler_t); void t4_register_fw_msg_handler(int, fw_msg_handler_t); void t4_register_cpl_handler(int, cpl_handler_t); void t4_register_shared_cpl_handler(int, cpl_handler_t, int); /* t4_tracer.c */ struct t4_tracer; void t4_tracer_modload(void); void t4_tracer_modunload(void); void t4_tracer_port_detach(struct adapter *); int t4_get_tracer(struct adapter *, struct t4_tracer *); int t4_set_tracer(struct adapter *, struct t4_tracer *); int t4_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *); int t5_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *); /* t4_sched.c */ int t4_set_sched_class(struct adapter *, struct t4_sched_params *); int t4_set_sched_queue(struct adapter *, struct t4_sched_queue *); int t4_init_tx_sched(struct adapter *); int t4_free_tx_sched(struct adapter *); void t4_update_tx_sched(struct adapter *); int t4_reserve_cl_rl_kbps(struct adapter *, int, u_int, int *); void t4_release_cl_rl(struct adapter *, int, int); int sysctl_tc(SYSCTL_HANDLER_ARGS); int sysctl_tc_params(SYSCTL_HANDLER_ARGS); /* t4_filter.c */ int get_filter_mode(struct adapter *, uint32_t *); int set_filter_mode(struct adapter *, uint32_t); int get_filter(struct adapter *, struct t4_filter *); int set_filter(struct adapter *, struct t4_filter *); int del_filter(struct adapter *, struct t4_filter *); int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); int t4_hashfilter_ao_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); int t4_hashfilter_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); int t4_del_hashfilter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); void free_hftid_hash(struct tid_info *); static inline struct wrqe * alloc_wrqe(int wr_len, struct sge_wrq *wrq) { int len = offsetof(struct wrqe, wr) + wr_len; struct wrqe *wr; wr = malloc(len, M_CXGBE, M_NOWAIT); if (__predict_false(wr == NULL)) return (NULL); wr->wr_len = wr_len; wr->wrq = wrq; return (wr); } static inline void * wrtod(struct wrqe *wr) { return (&wr->wr[0]); } static inline void free_wrqe(struct wrqe *wr) { free(wr, M_CXGBE); } static inline void t4_wrq_tx(struct adapter *sc, struct wrqe *wr) { struct sge_wrq *wrq = wr->wrq; TXQ_LOCK(wrq); t4_wrq_tx_locked(sc, wrq, wr); TXQ_UNLOCK(wrq); } static inline int read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, int len) { return (rw_via_memwin(sc, idx, addr, val, len, 0)); } static inline int write_via_memwin(struct adapter *sc, int idx, uint32_t addr, const uint32_t *val, int len) { return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); } #endif Index: stable/11/sys/dev/cxgbe/common/common.h =================================================================== --- stable/11/sys/dev/cxgbe/common/common.h (revision 346966) +++ stable/11/sys/dev/cxgbe/common/common.h (revision 346967) @@ -1,883 +1,885 @@ /*- * Copyright (c) 2011 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef __CHELSIO_COMMON_H #define __CHELSIO_COMMON_H #include "t4_hw.h" enum { MAX_NPORTS = 4, /* max # of ports */ SERNUM_LEN = 24, /* Serial # length */ EC_LEN = 16, /* E/C length */ ID_LEN = 16, /* ID length */ PN_LEN = 16, /* Part Number length */ MD_LEN = 16, /* MFG diags version length */ MACADDR_LEN = 12, /* MAC Address length */ }; enum { T4_REGMAP_SIZE = (160 * 1024), T5_REGMAP_SIZE = (332 * 1024), }; enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 }; enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST }; enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR }; enum { PAUSE_RX = 1 << 0, PAUSE_TX = 1 << 1, PAUSE_AUTONEG = 1 << 2 }; enum { FEC_NONE = 0, FEC_RS = 1 << 0, FEC_BASER_RS = 1 << 1, FEC_AUTO = 1 << 5, /* M_FW_PORT_CAP32_FEC + 1 */ }; enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; struct port_stats { u64 tx_octets; /* total # of octets in good frames */ u64 tx_frames; /* all good frames */ u64 tx_bcast_frames; /* all broadcast frames */ u64 tx_mcast_frames; /* all multicast frames */ u64 tx_ucast_frames; /* all unicast frames */ u64 tx_error_frames; /* all error frames */ u64 tx_frames_64; /* # of Tx frames in a particular range */ u64 tx_frames_65_127; u64 tx_frames_128_255; u64 tx_frames_256_511; u64 tx_frames_512_1023; u64 tx_frames_1024_1518; u64 tx_frames_1519_max; u64 tx_drop; /* # of dropped Tx frames */ u64 tx_pause; /* # of transmitted pause frames */ u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ u64 rx_octets; /* total # of octets in good frames */ u64 rx_frames; /* all good frames */ u64 rx_bcast_frames; /* all broadcast frames */ u64 rx_mcast_frames; /* all multicast frames */ u64 rx_ucast_frames; /* all unicast frames */ u64 rx_too_long; /* # of frames exceeding MTU */ u64 rx_jabber; /* # of jabber frames */ u64 rx_fcs_err; /* # of received frames with bad FCS */ u64 rx_len_err; /* # of received frames with length error */ u64 rx_symbol_err; /* symbol errors */ u64 rx_runt; /* # of short frames */ u64 rx_frames_64; /* # of Rx frames in a particular range */ u64 rx_frames_65_127; u64 rx_frames_128_255; u64 rx_frames_256_511; u64 rx_frames_512_1023; u64 rx_frames_1024_1518; u64 rx_frames_1519_max; u64 rx_pause; /* # of received pause frames */ u64 rx_ppp0; /* # of received PPP prio 0 frames */ u64 rx_ppp1; /* # of received PPP prio 1 frames */ u64 rx_ppp2; /* # of received PPP prio 2 frames */ u64 rx_ppp3; /* # of received PPP prio 3 frames */ u64 rx_ppp4; /* # of received PPP prio 4 frames */ u64 rx_ppp5; /* # of received PPP prio 5 frames */ u64 rx_ppp6; /* # of received PPP prio 6 frames */ u64 rx_ppp7; /* # of received PPP prio 7 frames */ u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ u64 rx_trunc0; /* buffer-group 0 truncated packets */ u64 rx_trunc1; /* buffer-group 1 truncated packets */ u64 rx_trunc2; /* buffer-group 2 truncated packets */ u64 rx_trunc3; /* buffer-group 3 truncated packets */ }; struct lb_port_stats { u64 octets; u64 frames; u64 bcast_frames; u64 mcast_frames; u64 ucast_frames; u64 error_frames; u64 frames_64; u64 frames_65_127; u64 frames_128_255; u64 frames_256_511; u64 frames_512_1023; u64 frames_1024_1518; u64 frames_1519_max; u64 drop; u64 ovflow0; u64 ovflow1; u64 ovflow2; u64 ovflow3; u64 trunc0; u64 trunc1; u64 trunc2; u64 trunc3; }; struct tp_tcp_stats { u32 tcp_out_rsts; u64 tcp_in_segs; u64 tcp_out_segs; u64 tcp_retrans_segs; }; struct tp_usm_stats { u32 frames; u32 drops; u64 octets; }; struct tp_fcoe_stats { u32 frames_ddp; u32 frames_drop; u64 octets_ddp; }; struct tp_err_stats { u32 mac_in_errs[MAX_NCHAN]; u32 hdr_in_errs[MAX_NCHAN]; u32 tcp_in_errs[MAX_NCHAN]; u32 tnl_cong_drops[MAX_NCHAN]; u32 ofld_chan_drops[MAX_NCHAN]; u32 tnl_tx_drops[MAX_NCHAN]; u32 ofld_vlan_drops[MAX_NCHAN]; u32 tcp6_in_errs[MAX_NCHAN]; u32 ofld_no_neigh; u32 ofld_cong_defer; }; struct tp_proxy_stats { u32 proxy[MAX_NCHAN]; }; struct tp_cpl_stats { u32 req[MAX_NCHAN]; u32 rsp[MAX_NCHAN]; }; struct tp_rdma_stats { u32 rqe_dfr_pkt; u32 rqe_dfr_mod; }; struct sge_params { int timer_val[SGE_NTIMERS]; /* final, scaled values */ int counter_val[SGE_NCOUNTERS]; int fl_starve_threshold; int fl_starve_threshold2; int page_shift; int eq_s_qpp; int iq_s_qpp; int spg_len; int pad_boundary; int pack_boundary; int fl_pktshift; u32 sge_control; u32 sge_fl_buffer_size[SGE_FLBUF_SIZES]; }; struct tp_params { unsigned int tre; /* log2 of core clocks per TP tick */ unsigned int dack_re; /* DACK timer resolution */ unsigned int la_mask; /* what events are recorded by TP LA */ unsigned short tx_modq[MAX_NCHAN]; /* channel to modulation queue map */ uint32_t vlan_pri_map; uint32_t ingress_config; uint64_t hash_filter_mask; __be16 err_vec_mask; int8_t fcoe_shift; int8_t port_shift; int8_t vnic_shift; int8_t vlan_shift; int8_t tos_shift; int8_t protocol_shift; int8_t ethertype_shift; int8_t macmatch_shift; int8_t matchtype_shift; int8_t frag_shift; }; struct vpd_params { unsigned int cclk; u8 ec[EC_LEN + 1]; u8 sn[SERNUM_LEN + 1]; u8 id[ID_LEN + 1]; u8 pn[PN_LEN + 1]; u8 na[MACADDR_LEN + 1]; u8 md[MD_LEN + 1]; }; struct pci_params { unsigned int vpd_cap_addr; unsigned int mps; unsigned short speed; unsigned short width; }; /* * Firmware device log. */ struct devlog_params { u32 memtype; /* which memory (FW_MEMTYPE_* ) */ u32 start; /* start of log in firmware memory */ u32 size; /* size of log */ u32 addr; /* start address in flat addr space */ }; /* Stores chip specific parameters */ struct chip_params { u8 nchan; u8 pm_stats_cnt; u8 cng_ch_bits_log; /* congestion channel map bits width */ u8 nsched_cls; u8 cim_num_obq; u16 mps_rplc_size; u16 vfcount; u32 sge_fl_db; u16 mps_tcam_size; }; /* VF-only parameters. */ /* * Global Receive Side Scaling (RSS) parameters in host-native format. */ struct rss_params { unsigned int mode; /* RSS mode */ union { struct { u_int synmapen:1; /* SYN Map Enable */ u_int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */ u_int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */ u_int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */ u_int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */ u_int ofdmapen:1; /* Offload Map Enable */ u_int tnlmapen:1; /* Tunnel Map Enable */ u_int tnlalllookup:1; /* Tunnel All Lookup */ u_int hashtoeplitz:1; /* use Toeplitz hash */ } basicvirtual; } u; }; /* * Maximum resources provisioned for a PCI VF. */ struct vf_resources { unsigned int nvi; /* N virtual interfaces */ unsigned int neq; /* N egress Qs */ unsigned int nethctrl; /* N egress ETH or CTRL Qs */ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ unsigned int niq; /* N ingress Qs */ unsigned int tc; /* PCI-E traffic class */ unsigned int pmask; /* port access rights mask */ unsigned int nexactf; /* N exact MPS filters */ unsigned int r_caps; /* read capabilities */ unsigned int wx_caps; /* write/execute capabilities */ }; struct adapter_params { struct sge_params sge; struct tp_params tp; /* PF-only */ struct vpd_params vpd; struct pci_params pci; struct devlog_params devlog; /* PF-only */ struct rss_params rss; /* VF-only */ struct vf_resources vfres; /* VF-only */ unsigned int core_vdd; unsigned int sf_size; /* serial flash size in bytes */ unsigned int sf_nsec; /* # of flash sectors */ unsigned int fw_vers; /* firmware version */ unsigned int bs_vers; /* bootstrap version */ unsigned int tp_vers; /* TP microcode version */ unsigned int er_vers; /* expansion ROM version */ unsigned int scfg_vers; /* Serial Configuration version */ unsigned int vpd_vers; /* VPD version */ unsigned short mtus[NMTUS]; unsigned short a_wnd[NCCTRL_WIN]; unsigned short b_wnd[NCCTRL_WIN]; unsigned int cim_la_size; uint8_t nports; /* # of ethernet ports */ uint8_t portvec; unsigned int chipid:4; /* chip ID. T4 = 4, T5 = 5, ... */ unsigned int rev:4; /* chip revision */ unsigned int fpga:1; /* this is an FPGA */ unsigned int offload:1; /* hw is TOE capable, fw has divvied up card resources for TOE operation. */ unsigned int bypass:1; /* this is a bypass card */ unsigned int ethoffload:1; unsigned int port_caps32:1; unsigned int hash_filter:1; unsigned int filter2_wr_support:1; unsigned int ofldq_wr_cred; unsigned int eo_wr_cred; unsigned int max_ordird_qp; unsigned int max_ird_adapter; uint32_t mps_bg_map; /* rx buffer group map for all ports (upto 4) */ - bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ - bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */ + bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ + bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */ + bool viid_smt_extn_support; /* FW returns vin, vfvld & smt index? */ }; #define CHELSIO_T4 0x4 #define CHELSIO_T5 0x5 #define CHELSIO_T6 0x6 /* * State needed to monitor the forward progress of SGE Ingress DMA activities * and possible hangs. */ struct sge_idma_monitor_state { unsigned int idma_1s_thresh; /* 1s threshold in Core Clock ticks */ unsigned int idma_stalled[2]; /* synthesized stalled timers in HZ */ unsigned int idma_state[2]; /* IDMA Hang detect state */ unsigned int idma_qid[2]; /* IDMA Hung Ingress Queue ID */ unsigned int idma_warn[2]; /* time to warning in HZ */ }; struct trace_params { u32 data[TRACE_LEN / 4]; u32 mask[TRACE_LEN / 4]; unsigned short snap_len; unsigned short min_len; unsigned char skip_ofst; unsigned char skip_len; unsigned char invert; unsigned char port; }; struct link_config { /* OS-specific code owns all the requested_* fields. */ int8_t requested_aneg; /* link autonegotiation */ int8_t requested_fc; /* flow control */ int8_t requested_fec; /* FEC */ u_int requested_speed; /* speed (Mbps) */ uint32_t supported; /* link capabilities */ uint32_t advertising; /* advertised capabilities */ uint32_t lp_advertising; /* peer advertised capabilities */ uint32_t fec_hint; /* use this fec */ u_int speed; /* actual link speed (Mbps) */ int8_t fc; /* actual link flow control */ int8_t fec; /* actual FEC */ bool link_ok; /* link up? */ uint8_t link_down_rc; /* link down reason */ }; #include "adapter.h" #ifndef PCI_VENDOR_ID_CHELSIO # define PCI_VENDOR_ID_CHELSIO 0x1425 #endif #define for_each_port(adapter, iter) \ for (iter = 0; iter < (adapter)->params.nports; ++iter) static inline int is_ftid(const struct adapter *sc, u_int tid) { return (sc->tids.nftids > 0 && tid >= sc->tids.ftid_base && tid <= sc->tids.ftid_end); } static inline int is_hpftid(const struct adapter *sc, u_int tid) { return (sc->tids.nhpftids > 0 && tid >= sc->tids.hpftid_base && tid <= sc->tids.hpftid_end); } static inline int is_etid(const struct adapter *sc, u_int tid) { return (sc->tids.netids > 0 && tid >= sc->tids.etid_base && tid <= sc->tids.etid_end); } static inline int is_offload(const struct adapter *adap) { return adap->params.offload; } static inline int is_ethoffload(const struct adapter *adap) { return adap->params.ethoffload; } static inline int is_hashfilter(const struct adapter *adap) { return adap->params.hash_filter; } static inline int chip_id(struct adapter *adap) { return adap->params.chipid; } static inline int chip_rev(struct adapter *adap) { return adap->params.rev; } static inline int is_t4(struct adapter *adap) { return adap->params.chipid == CHELSIO_T4; } static inline int is_t5(struct adapter *adap) { return adap->params.chipid == CHELSIO_T5; } static inline int is_t6(struct adapter *adap) { return adap->params.chipid == CHELSIO_T6; } static inline int is_fpga(struct adapter *adap) { return adap->params.fpga; } static inline unsigned int core_ticks_per_usec(const struct adapter *adap) { return adap->params.vpd.cclk / 1000; } static inline unsigned int us_to_core_ticks(const struct adapter *adap, unsigned int us) { return (us * adap->params.vpd.cclk) / 1000; } static inline unsigned int core_ticks_to_us(const struct adapter *adapter, unsigned int ticks) { /* add Core Clock / 2 to round ticks to nearest uS */ return ((ticks * 1000 + adapter->params.vpd.cclk/2) / adapter->params.vpd.cclk); } static inline unsigned int dack_ticks_to_usec(const struct adapter *adap, unsigned int ticks) { return (ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap); } static inline u_int us_to_tcp_ticks(const struct adapter *adap, u_long us) { return (us * adap->params.vpd.cclk / 1000 >> adap->params.tp.tre); } static inline u_int tcp_ticks_to_us(const struct adapter *adap, u_int ticks) { return ((uint64_t)ticks << adap->params.tp.tre) / core_ticks_per_usec(adap); } void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, u32 val); int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, bool sleep_ok, int timeout); int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, bool sleep_ok); static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, int timeout) { return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true, timeout); } static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl) { return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); } static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl) { return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); } void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, u32 *vals, unsigned int nregs, unsigned int start_idx); void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx); u32 t4_hw_pci_read_cfg4(adapter_t *adapter, int reg); struct fw_filter_wr; void t4_intr_enable(struct adapter *adapter); void t4_intr_disable(struct adapter *adapter); void t4_intr_clear(struct adapter *adapter); int t4_slow_intr_handler(struct adapter *adapter, bool verbose); int t4_hash_mac_addr(const u8 *addr); int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc); int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data); int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data); int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz); int t4_seeprom_wp(struct adapter *adapter, int enable); int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t4_write_flash(struct adapter *adapter, unsigned int addr, unsigned int n, const u8 *data, int byte_oriented); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op); int t5_fw_init_extern_mem(struct adapter *adap); int t4_load_bootcfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); int t4_load_boot(struct adapter *adap, u8 *boot_data, unsigned int boot_addr, unsigned int size); int t4_flash_erase_sectors(struct adapter *adapter, int start, int end); int t4_flash_cfg_addr(struct adapter *adapter); int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); int t4_get_fw_version(struct adapter *adapter, u32 *vers); int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr); int t4_get_bs_version(struct adapter *adapter, u32 *vers); int t4_get_tp_version(struct adapter *adapter, u32 *vers); int t4_get_exprom_version(struct adapter *adapter, u32 *vers); int t4_get_scfg_version(struct adapter *adapter, u32 *vers); int t4_get_vpd_version(struct adapter *adapter, u32 *vers); int t4_get_version_info(struct adapter *adapter); int t4_init_hw(struct adapter *adapter, u32 fw_params); const struct chip_params *t4_get_chip_params(int chipid); int t4_prep_adapter(struct adapter *adapter, u32 *buf); int t4_shutdown_adapter(struct adapter *adapter); int t4_init_devlog_params(struct adapter *adapter, int fw_attach); int t4_init_sge_params(struct adapter *adapter); int t4_init_tp_params(struct adapter *adap, bool sleep_ok); int t4_filter_field_shift(const struct adapter *adap, int filter_sel); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id); void t4_fatal_err(struct adapter *adapter, bool fw_error); int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, int filter_index, int enable); void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, int filter_index, int *enabled); int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq); int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, unsigned int flags); int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, unsigned int flags, unsigned int defq, unsigned int skeyidx, unsigned int skey); int t4_read_rss(struct adapter *adapter, u16 *entries); void t4_read_rss_key(struct adapter *adapter, u32 *key, bool sleep_ok); void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, bool sleep_ok); void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp, bool sleep_ok); void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val, bool sleep_ok); void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, u32 *vfl, u32 *vfh, bool sleep_ok); void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, u32 vfl, u32 vfh, bool sleep_ok); u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok); void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok); u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok); void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok); int t4_mps_set_active_ports(struct adapter *adap, unsigned int port_mask); void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]); void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]); void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres); int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n); int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n); int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, unsigned int *valp); int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, const unsigned int *valp); int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, unsigned int *valp); int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr); void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr); void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp); int t4_get_flash_params(struct adapter *adapter); u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach); int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *parity); int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *parity); int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 size, __be32 *data); void t4_idma_monitor_init(struct adapter *adapter, struct sge_idma_monitor_state *idma); void t4_idma_monitor(struct adapter *adapter, struct sge_idma_monitor_state *idma, int hz, int ticks); unsigned int t4_get_regs_len(struct adapter *adapter); void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size); const char *t4_get_port_type_description(enum fw_port_type port_type); void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); void t4_get_port_stats_offset(struct adapter *adap, int idx, struct port_stats *stats, struct port_stats *offset); void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p); void t4_clr_port_stats(struct adapter *adap, int idx); void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]); void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]); void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, unsigned int *ipg, bool sleep_ok); void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, unsigned int mask, unsigned int val); void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr); void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, bool sleep_ok); void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st, bool sleep_ok); void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, bool sleep_ok); void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, bool sleep_ok); void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, bool sleep_ok); void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6, bool sleep_ok); void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, struct tp_fcoe_stats *st, bool sleep_ok); void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, const unsigned short *alpha, const unsigned short *beta); void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf); int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps); int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg); int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, unsigned int start, unsigned int n); void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate); int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map, bool sleep_ok); void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid); void t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr); int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, u64 mask0, u64 mask1, unsigned int crc, bool enable); int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, enum dev_master master, enum dev_state *state); int t4_fw_bye(struct adapter *adap, unsigned int mbox); int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force); int t4_fw_restart(struct adapter *adap, unsigned int mbox); int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, const u8 *fw_data, unsigned int size, int force); int t4_fw_initialize(struct adapter *adap, unsigned int mbox); int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val); int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val, int rw); int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val, int timeout); int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val); int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi, unsigned int cmask, unsigned int pmask, unsigned int exactf, unsigned int rcaps, unsigned int wxcaps); int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, u16 *rss_size, + uint8_t *vfvld, uint16_t *vin, unsigned int portfunc, unsigned int idstype); int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, - u16 *rss_size); + u16 *rss_size, uint8_t *vfvld, uint16_t *vin); int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int viid); int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok); int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, - int idx, const u8 *addr, bool persist, bool add_smt); + int idx, const u8 *addr, bool persist, uint16_t *smt_idx); int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, bool ucast, u64 vec, bool sleep_ok); int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en, bool dcb_en); int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en); int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, unsigned int nblinks); int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, unsigned int *valp); int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, unsigned int val); int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf); int t4_i2c_wr(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf); int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id); int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id); int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, enum ctxt_type ctype, u32 *data); int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, u32 *data); int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); const char *t4_link_down_rc_str(unsigned char link_down_rc); int t4_update_port_info(struct port_info *pi); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val); int t4_sched_config(struct adapter *adapter, int type, int minmaxen, int sleep_ok); int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int rateunit, int ratemode, int channel, int cl, int minrate, int maxrate, int weight, int pktsize, int burstsize, int sleep_ok); int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode, unsigned int maxrate, int sleep_ok); int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl, int weight, int sleep_ok); int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl, int mode, unsigned int maxrate, int pktsize, int sleep_ok); int t4_config_watchdog(struct adapter *adapter, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int timeout, unsigned int action); int t4_get_devlog_level(struct adapter *adapter, unsigned int *level); int t4_set_devlog_level(struct adapter *adapter, unsigned int level); void t4_sge_decode_idma_state(struct adapter *adapter, int state); void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok); void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs, u32 start_index, bool sleep_ok); void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok); void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok); static inline int t4vf_query_params(struct adapter *adapter, unsigned int nparams, const u32 *params, u32 *vals) { return t4_query_params(adapter, 0, 0, 0, nparams, params, vals); } static inline int t4vf_set_params(struct adapter *adapter, unsigned int nparams, const u32 *params, const u32 *vals) { return t4_set_params(adapter, 0, 0, 0, nparams, params, vals); } static inline int t4vf_wr_mbox(struct adapter *adap, const void *cmd, int size, void *rpl) { return t4_wr_mbox(adap, adap->mbox, cmd, size, rpl); } int t4vf_wait_dev_ready(struct adapter *adapter); int t4vf_fw_reset(struct adapter *adapter); int t4vf_get_sge_params(struct adapter *adapter); int t4vf_get_rss_glb_config(struct adapter *adapter); int t4vf_get_vfres(struct adapter *adapter); int t4vf_prep_adapter(struct adapter *adapter); int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, enum t4_bar2_qtype qtype, int user, u64 *pbar2_qoffset, unsigned int *pbar2_qid); unsigned int fwcap_to_speed(uint32_t caps); uint32_t speed_to_fwcap(unsigned int speed); uint32_t fwcap_top_speed(uint32_t caps); static inline int port_top_speed(const struct port_info *pi) { /* Mbps -> Gbps */ return (fwcap_to_speed(pi->link_cfg.supported) / 1000); } #endif /* __CHELSIO_COMMON_H */ Index: stable/11/sys/dev/cxgbe/common/t4_hw.c =================================================================== --- stable/11/sys/dev/cxgbe/common/t4_hw.c (revision 346966) +++ stable/11/sys/dev/cxgbe/common/t4_hw.c (revision 346967) @@ -1,10760 +1,10779 @@ /*- * Copyright (c) 2012, 2016 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include #include #include "common.h" #include "t4_regs.h" #include "t4_regs_values.h" #include "firmware/t4fw_interface.h" #undef msleep #define msleep(x) do { \ if (cold) \ DELAY((x) * 1000); \ else \ pause("t4hw", (x) * hz / 1000); \ } while (0) /** * t4_wait_op_done_val - wait until an operation is completed * @adapter: the adapter performing the operation * @reg: the register to check for completion * @mask: a single-bit field within @reg that indicates completion * @polarity: the value of the field when the operation is completed * @attempts: number of check iterations * @delay: delay in usecs between iterations * @valp: where to store the value of the register at completion time * * Wait until an operation is completed by checking a bit in a register * up to @attempts times. If @valp is not NULL the value of the register * at the time it indicated completion is stored there. Returns 0 if the * operation completes and -EAGAIN otherwise. */ static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, int polarity, int attempts, int delay, u32 *valp) { while (1) { u32 val = t4_read_reg(adapter, reg); if (!!(val & mask) == polarity) { if (valp) *valp = val; return 0; } if (--attempts == 0) return -EAGAIN; if (delay) udelay(delay); } } static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, int polarity, int attempts, int delay) { return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, delay, NULL); } /** * t4_set_reg_field - set a register field to a value * @adapter: the adapter to program * @addr: the register address * @mask: specifies the portion of the register to modify * @val: the new value for the register field * * Sets a register field specified by the supplied mask to the * given value. */ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, u32 val) { u32 v = t4_read_reg(adapter, addr) & ~mask; t4_write_reg(adapter, addr, v | val); (void) t4_read_reg(adapter, addr); /* flush */ } /** * t4_read_indirect - read indirectly addressed registers * @adap: the adapter * @addr_reg: register holding the indirect address * @data_reg: register holding the value of the indirect register * @vals: where the read register values are stored * @nregs: how many indirect registers to read * @start_idx: index of first indirect register to read * * Reads registers that are accessed indirectly through an address/data * register pair. */ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, u32 *vals, unsigned int nregs, unsigned int start_idx) { while (nregs--) { t4_write_reg(adap, addr_reg, start_idx); *vals++ = t4_read_reg(adap, data_reg); start_idx++; } } /** * t4_write_indirect - write indirectly addressed registers * @adap: the adapter * @addr_reg: register holding the indirect addresses * @data_reg: register holding the value for the indirect registers * @vals: values to write * @nregs: how many indirect registers to write * @start_idx: address of first indirect register to write * * Writes a sequential block of registers that are accessed indirectly * through an address/data register pair. */ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx) { while (nregs--) { t4_write_reg(adap, addr_reg, start_idx++); t4_write_reg(adap, data_reg, *vals++); } } /* * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor * mechanism. This guarantees that we get the real value even if we're * operating within a Virtual Machine and the Hypervisor is trapping our * Configuration Space accesses. * * N.B. This routine should only be used as a last resort: the firmware uses * the backdoor registers on a regular basis and we can end up * conflicting with it's uses! */ u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg) { u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg); u32 val; if (chip_id(adap) <= CHELSIO_T5) req |= F_ENABLE; else req |= F_T6_ENABLE; if (is_t4(adap)) req |= F_LOCALCFG; t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req); val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); /* * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a * Configuration Space read. (None of the other fields matter when * F_ENABLE is 0 so a simple register write is easier than a * read-modify-write via t4_set_reg_field().) */ t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0); return val; } /* * t4_report_fw_error - report firmware error * @adap: the adapter * * The adapter firmware can indicate error conditions to the host. * If the firmware has indicated an error, print out the reason for * the firmware error. */ static void t4_report_fw_error(struct adapter *adap) { static const char *const reason[] = { "Crash", /* PCIE_FW_EVAL_CRASH */ "During Device Preparation", /* PCIE_FW_EVAL_PREP */ "During Device Configuration", /* PCIE_FW_EVAL_CONF */ "During Device Initialization", /* PCIE_FW_EVAL_INIT */ "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ "Reserved", /* reserved */ }; u32 pcie_fw; pcie_fw = t4_read_reg(adap, A_PCIE_FW); if (pcie_fw & F_PCIE_FW_ERR) { adap->flags &= ~FW_OK; CH_ERR(adap, "firmware reports adapter error: %s (0x%08x)\n", reason[G_PCIE_FW_EVAL(pcie_fw)], pcie_fw); if (pcie_fw != 0xffffffff) t4_os_dump_devlog(adap); } } /* * Get the reply to a mailbox command and store it in @rpl in big-endian order. */ static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, u32 mbox_addr) { for ( ; nflit; nflit--, mbox_addr += 8) *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); } /* * Handle a FW assertion reported in a mailbox. */ static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt) { CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", asrt->u.assert.filename_0_7, be32_to_cpu(asrt->u.assert.line), be32_to_cpu(asrt->u.assert.x), be32_to_cpu(asrt->u.assert.y)); } struct port_tx_state { uint64_t rx_pause; uint64_t tx_frames; }; static void read_tx_state_one(struct adapter *sc, int i, struct port_tx_state *tx_state) { uint32_t rx_pause_reg, tx_frames_reg; if (is_t4(sc)) { tx_frames_reg = PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L); rx_pause_reg = PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L); } else { tx_frames_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L); rx_pause_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L); } tx_state->rx_pause = t4_read_reg64(sc, rx_pause_reg); tx_state->tx_frames = t4_read_reg64(sc, tx_frames_reg); } static void read_tx_state(struct adapter *sc, struct port_tx_state *tx_state) { int i; for_each_port(sc, i) read_tx_state_one(sc, i, &tx_state[i]); } static void check_tx_state(struct adapter *sc, struct port_tx_state *tx_state) { uint32_t port_ctl_reg; uint64_t tx_frames, rx_pause; int i; for_each_port(sc, i) { rx_pause = tx_state[i].rx_pause; tx_frames = tx_state[i].tx_frames; read_tx_state_one(sc, i, &tx_state[i]); /* update */ if (is_t4(sc)) port_ctl_reg = PORT_REG(i, A_MPS_PORT_CTL); else port_ctl_reg = T5_PORT_REG(i, A_MPS_PORT_CTL); if (t4_read_reg(sc, port_ctl_reg) & F_PORTTXEN && rx_pause != tx_state[i].rx_pause && tx_frames == tx_state[i].tx_frames) { t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, 0); mdelay(1); t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, F_PORTTXEN); } } } #define X_CIM_PF_NOACCESS 0xeeeeeeee /** * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox * @adap: the adapter * @mbox: index of the mailbox to use * @cmd: the command to write * @size: command length in bytes * @rpl: where to optionally store the reply * @sleep_ok: if true we may sleep while awaiting command completion * @timeout: time to wait for command to finish before timing out * (negative implies @sleep_ok=false) * * Sends the given command to FW through the selected mailbox and waits * for the FW to execute the command. If @rpl is not %NULL it is used to * store the FW's reply to the command. The command and its optional * reply are of the same length. Some FW commands like RESET and * INITIALIZE can take a considerable amount of time to execute. * @sleep_ok determines whether we may sleep while awaiting the response. * If sleeping is allowed we use progressive backoff otherwise we spin. * Note that passing in a negative @timeout is an alternate mechanism * for specifying @sleep_ok=false. This is useful when a higher level * interface allows for specification of @timeout but not @sleep_ok ... * * The return value is 0 on success or a negative errno on failure. A * failure can happen either because we are not able to execute the * command or FW executes it but signals an error. In the latter case * the return value is the error code indicated by FW (negated). */ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, bool sleep_ok, int timeout) { /* * We delay in small increments at first in an effort to maintain * responsiveness for simple, fast executing commands but then back * off to larger delays to a maximum retry delay. */ static const int delay[] = { 1, 1, 3, 5, 10, 10, 20, 50, 100 }; u32 v; u64 res; int i, ms, delay_idx, ret, next_tx_check; u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); u32 ctl; __be64 cmd_rpl[MBOX_LEN/8]; u32 pcie_fw; struct port_tx_state tx_state[MAX_NPORTS]; if (adap->flags & CHK_MBOX_ACCESS) ASSERT_SYNCHRONIZED_OP(adap); if (size <= 0 || (size & 15) || size > MBOX_LEN) return -EINVAL; if (adap->flags & IS_VF) { if (is_t6(adap)) data_reg = FW_T6VF_MBDATA_BASE_ADDR; else data_reg = FW_T4VF_MBDATA_BASE_ADDR; ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL); } /* * If we have a negative timeout, that implies that we can't sleep. */ if (timeout < 0) { sleep_ok = false; timeout = -timeout; } /* * Attempt to gain access to the mailbox. */ for (i = 0; i < 4; i++) { ctl = t4_read_reg(adap, ctl_reg); v = G_MBOWNER(ctl); if (v != X_MBOWNER_NONE) break; } /* * If we were unable to gain access, report the error to our caller. */ if (v != X_MBOWNER_PL) { t4_report_fw_error(adap); ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; return ret; } /* * If we gain ownership of the mailbox and there's a "valid" message * in it, this is likely an asynchronous error message from the * firmware. So we'll report that and then proceed on with attempting * to issue our own command ... which may well fail if the error * presaged the firmware crashing ... */ if (ctl & F_MBMSGVALID) { CH_DUMP_MBOX(adap, mbox, data_reg, "VLD", NULL, true); } /* * Copy in the new mailbox command and send it on its way ... */ memset(cmd_rpl, 0, sizeof(cmd_rpl)); memcpy(cmd_rpl, cmd, size); CH_DUMP_MBOX(adap, mbox, 0, "cmd", cmd_rpl, false); for (i = 0; i < ARRAY_SIZE(cmd_rpl); i++) t4_write_reg64(adap, data_reg + i * 8, be64_to_cpu(cmd_rpl[i])); if (adap->flags & IS_VF) { /* * For the VFs, the Mailbox Data "registers" are * actually backed by T4's "MA" interface rather than * PL Registers (as is the case for the PFs). Because * these are in different coherency domains, the write * to the VF's PL-register-backed Mailbox Control can * race in front of the writes to the MA-backed VF * Mailbox Data "registers". So we need to do a * read-back on at least one byte of the VF Mailbox * Data registers before doing the write to the VF * Mailbox Control register. */ t4_read_reg(adap, data_reg); } t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); read_tx_state(adap, &tx_state[0]); /* also flushes the write_reg */ next_tx_check = 1000; delay_idx = 0; ms = delay[0]; /* * Loop waiting for the reply; bail out if we time out or the firmware * reports an error. */ pcie_fw = 0; for (i = 0; i < timeout; i += ms) { if (!(adap->flags & IS_VF)) { pcie_fw = t4_read_reg(adap, A_PCIE_FW); if (pcie_fw & F_PCIE_FW_ERR) break; } if (i >= next_tx_check) { check_tx_state(adap, &tx_state[0]); next_tx_check = i + 1000; } if (sleep_ok) { ms = delay[delay_idx]; /* last element may repeat */ if (delay_idx < ARRAY_SIZE(delay) - 1) delay_idx++; msleep(ms); } else { mdelay(ms); } v = t4_read_reg(adap, ctl_reg); if (v == X_CIM_PF_NOACCESS) continue; if (G_MBOWNER(v) == X_MBOWNER_PL) { if (!(v & F_MBMSGVALID)) { t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); continue; } /* * Retrieve the command reply and release the mailbox. */ get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg); CH_DUMP_MBOX(adap, mbox, 0, "rpl", cmd_rpl, false); t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); res = be64_to_cpu(cmd_rpl[0]); if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); res = V_FW_CMD_RETVAL(EIO); } else if (rpl) memcpy(rpl, cmd_rpl, size); return -G_FW_CMD_RETVAL((int)res); } } /* * We timed out waiting for a reply to our mailbox command. Report * the error and also check to see if the firmware reported any * errors ... */ CH_ERR(adap, "command %#x in mbox %d timed out (0x%08x).\n", *(const u8 *)cmd, mbox, pcie_fw); CH_DUMP_MBOX(adap, mbox, 0, "cmdsent", cmd_rpl, true); CH_DUMP_MBOX(adap, mbox, data_reg, "current", NULL, true); if (pcie_fw & F_PCIE_FW_ERR) { ret = -ENXIO; t4_report_fw_error(adap); } else { ret = -ETIMEDOUT; t4_os_dump_devlog(adap); } t4_fatal_err(adap, true); return ret; } int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, bool sleep_ok) { return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok, FW_CMD_MAX_TIMEOUT); } static int t4_edc_err_read(struct adapter *adap, int idx) { u32 edc_ecc_err_addr_reg; u32 edc_bist_status_rdata_reg; if (is_t4(adap)) { CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); return 0; } if (idx != MEM_EDC0 && idx != MEM_EDC1) { CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); return 0; } edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx); edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx); CH_WARN(adap, "edc%d err addr 0x%x: 0x%x.\n", idx, edc_ecc_err_addr_reg, t4_read_reg(adap, edc_ecc_err_addr_reg)); CH_WARN(adap, "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n", edc_bist_status_rdata_reg, (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64)); return 0; } /** * t4_mc_read - read from MC through backdoor accesses * @adap: the adapter * @idx: which MC to access * @addr: address of first byte requested * @data: 64 bytes of data containing the requested address * @ecc: where to store the corresponding 64-bit ECC word * * Read 64 bytes of data from MC starting at a 64-byte-aligned address * that covers the requested address @addr. If @parity is not %NULL it * is assigned the 64-bit ECC word for the read data. */ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) { int i; u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; if (is_t4(adap)) { mc_bist_cmd_reg = A_MC_BIST_CMD; mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR; mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN; mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA; mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN; } else { mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx); mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx); mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx); mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, idx); mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, idx); } if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST) return -EBUSY; t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU); t4_write_reg(adap, mc_bist_cmd_len_reg, 64); t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc); t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) | F_START_BIST | V_BIST_CMD_GAP(1)); i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1); if (i) return i; #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i) for (i = 15; i >= 0; i--) *data++ = ntohl(t4_read_reg(adap, MC_DATA(i))); if (ecc) *ecc = t4_read_reg64(adap, MC_DATA(16)); #undef MC_DATA return 0; } /** * t4_edc_read - read from EDC through backdoor accesses * @adap: the adapter * @idx: which EDC to access * @addr: address of first byte requested * @data: 64 bytes of data containing the requested address * @ecc: where to store the corresponding 64-bit ECC word * * Read 64 bytes of data from EDC starting at a 64-byte-aligned address * that covers the requested address @addr. If @parity is not %NULL it * is assigned the 64-bit ECC word for the read data. */ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) { int i; u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg; if (is_t4(adap)) { edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx); edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx); edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx); edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN, idx); edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA, idx); } else { /* * These macro are missing in t4_regs.h file. * Added temporarily for testing. */ #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx); edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx); edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx); edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN, idx); edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA, idx); #undef EDC_REG_T5 #undef EDC_STRIDE_T5 } if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST) return -EBUSY; t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU); t4_write_reg(adap, edc_bist_cmd_len_reg, 64); t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); t4_write_reg(adap, edc_bist_cmd_reg, V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST); i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1); if (i) return i; #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i) for (i = 15; i >= 0; i--) *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i))); if (ecc) *ecc = t4_read_reg64(adap, EDC_DATA(16)); #undef EDC_DATA return 0; } /** * t4_mem_read - read EDC 0, EDC 1 or MC into buffer * @adap: the adapter * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC * @addr: address within indicated memory type * @len: amount of memory to read * @buf: host memory buffer * * Reads an [almost] arbitrary memory region in the firmware: the * firmware memory address, length and host buffer must be aligned on * 32-bit boudaries. The memory is returned as a raw byte sequence from * the firmware's memory. If this memory contains data structures which * contain multi-byte integers, it's the callers responsibility to * perform appropriate byte order conversions. */ int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, __be32 *buf) { u32 pos, start, end, offset; int ret; /* * Argument sanity checks ... */ if ((addr & 0x3) || (len & 0x3)) return -EINVAL; /* * The underlaying EDC/MC read routines read 64 bytes at a time so we * need to round down the start and round up the end. We'll start * copying out of the first line at (addr - start) a word at a time. */ start = rounddown2(addr, 64); end = roundup2(addr + len, 64); offset = (addr - start)/sizeof(__be32); for (pos = start; pos < end; pos += 64, offset = 0) { __be32 data[16]; /* * Read the chip's memory block and bail if there's an error. */ if ((mtype == MEM_MC) || (mtype == MEM_MC1)) ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL); else ret = t4_edc_read(adap, mtype, pos, data, NULL); if (ret) return ret; /* * Copy the data into the caller's memory buffer. */ while (offset < 16 && len > 0) { *buf++ = data[offset++]; len -= sizeof(__be32); } } return 0; } /* * Return the specified PCI-E Configuration Space register from our Physical * Function. We try first via a Firmware LDST Command (if fw_attach != 0) * since we prefer to let the firmware own all of these registers, but if that * fails we go for it directly ourselves. */ u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach) { /* * If fw_attach != 0, construct and send the Firmware LDST Command to * retrieve the specified PCI-E Configuration Space register. */ if (drv_fw_attach != 0) { struct fw_ldst_cmd ldst_cmd; int ret; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1); ldst_cmd.u.pcie.ctrl_to_fn = (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf)); ldst_cmd.u.pcie.r = reg; /* * If the LDST Command succeeds, return the result, otherwise * fall through to reading it directly ourselves ... */ ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); if (ret == 0) return be32_to_cpu(ldst_cmd.u.pcie.data[0]); CH_WARN(adap, "Firmware failed to return " "Configuration Space register %d, err = %d\n", reg, -ret); } /* * Read the desired Configuration Space register via the PCI-E * Backdoor mechanism. */ return t4_hw_pci_read_cfg4(adap, reg); } /** * t4_get_regs_len - return the size of the chips register set * @adapter: the adapter * * Returns the size of the chip's BAR0 register space. */ unsigned int t4_get_regs_len(struct adapter *adapter) { unsigned int chip_version = chip_id(adapter); switch (chip_version) { case CHELSIO_T4: if (adapter->flags & IS_VF) return FW_T4VF_REGMAP_SIZE; return T4_REGMAP_SIZE; case CHELSIO_T5: case CHELSIO_T6: if (adapter->flags & IS_VF) return FW_T4VF_REGMAP_SIZE; return T5_REGMAP_SIZE; } CH_ERR(adapter, "Unsupported chip version %d\n", chip_version); return 0; } /** * t4_get_regs - read chip registers into provided buffer * @adap: the adapter * @buf: register buffer * @buf_size: size (in bytes) of register buffer * * If the provided register buffer isn't large enough for the chip's * full register range, the register dump will be truncated to the * register buffer's size. */ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size) { static const unsigned int t4_reg_ranges[] = { 0x1008, 0x1108, 0x1180, 0x1184, 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, 0x11fc, 0x123c, 0x1300, 0x173c, 0x1800, 0x18fc, 0x3000, 0x30d8, 0x30e0, 0x30e4, 0x30ec, 0x5910, 0x5920, 0x5924, 0x5960, 0x5960, 0x5968, 0x5968, 0x5970, 0x5970, 0x5978, 0x5978, 0x5980, 0x5980, 0x5988, 0x5988, 0x5990, 0x5990, 0x5998, 0x5998, 0x59a0, 0x59d4, 0x5a00, 0x5ae0, 0x5ae8, 0x5ae8, 0x5af0, 0x5af0, 0x5af8, 0x5af8, 0x6000, 0x6098, 0x6100, 0x6150, 0x6200, 0x6208, 0x6240, 0x6248, 0x6280, 0x62b0, 0x62c0, 0x6338, 0x6370, 0x638c, 0x6400, 0x643c, 0x6500, 0x6524, 0x6a00, 0x6a04, 0x6a14, 0x6a38, 0x6a60, 0x6a70, 0x6a78, 0x6a78, 0x6b00, 0x6b0c, 0x6b1c, 0x6b84, 0x6bf0, 0x6bf8, 0x6c00, 0x6c0c, 0x6c1c, 0x6c84, 0x6cf0, 0x6cf8, 0x6d00, 0x6d0c, 0x6d1c, 0x6d84, 0x6df0, 0x6df8, 0x6e00, 0x6e0c, 0x6e1c, 0x6e84, 0x6ef0, 0x6ef8, 0x6f00, 0x6f0c, 0x6f1c, 0x6f84, 0x6ff0, 0x6ff8, 0x7000, 0x700c, 0x701c, 0x7084, 0x70f0, 0x70f8, 0x7100, 0x710c, 0x711c, 0x7184, 0x71f0, 0x71f8, 0x7200, 0x720c, 0x721c, 0x7284, 0x72f0, 0x72f8, 0x7300, 0x730c, 0x731c, 0x7384, 0x73f0, 0x73f8, 0x7400, 0x7450, 0x7500, 0x7530, 0x7600, 0x760c, 0x7614, 0x761c, 0x7680, 0x76cc, 0x7700, 0x7798, 0x77c0, 0x77fc, 0x7900, 0x79fc, 0x7b00, 0x7b58, 0x7b60, 0x7b84, 0x7b8c, 0x7c38, 0x7d00, 0x7d38, 0x7d40, 0x7d80, 0x7d8c, 0x7ddc, 0x7de4, 0x7e04, 0x7e10, 0x7e1c, 0x7e24, 0x7e38, 0x7e40, 0x7e44, 0x7e4c, 0x7e78, 0x7e80, 0x7ea4, 0x7eac, 0x7edc, 0x7ee8, 0x7efc, 0x8dc0, 0x8e04, 0x8e10, 0x8e1c, 0x8e30, 0x8e78, 0x8ea0, 0x8eb8, 0x8ec0, 0x8f6c, 0x8fc0, 0x9008, 0x9010, 0x9058, 0x9060, 0x9060, 0x9068, 0x9074, 0x90fc, 0x90fc, 0x9400, 0x9408, 0x9410, 0x9458, 0x9600, 0x9600, 0x9608, 0x9638, 0x9640, 0x96bc, 0x9800, 0x9808, 0x9820, 0x983c, 0x9850, 0x9864, 0x9c00, 0x9c6c, 0x9c80, 0x9cec, 0x9d00, 0x9d6c, 0x9d80, 0x9dec, 0x9e00, 0x9e6c, 0x9e80, 0x9eec, 0x9f00, 0x9f6c, 0x9f80, 0x9fec, 0xd004, 0xd004, 0xd010, 0xd03c, 0xdfc0, 0xdfe0, 0xe000, 0xea7c, 0xf000, 0x11110, 0x11118, 0x11190, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x190e4, 0x190f0, 0x190f8, 0x19100, 0x19110, 0x19120, 0x19124, 0x19150, 0x19194, 0x1919c, 0x191b0, 0x191d0, 0x191e8, 0x19238, 0x1924c, 0x193f8, 0x1943c, 0x1944c, 0x19474, 0x19490, 0x194e0, 0x194f0, 0x194f8, 0x19800, 0x19c08, 0x19c10, 0x19c90, 0x19ca0, 0x19ce4, 0x19cf0, 0x19d40, 0x19d50, 0x19d94, 0x19da0, 0x19de8, 0x19df0, 0x19e40, 0x19e50, 0x19e90, 0x19ea0, 0x19f4c, 0x1a000, 0x1a004, 0x1a010, 0x1a06c, 0x1a0b0, 0x1a0e4, 0x1a0ec, 0x1a0f4, 0x1a100, 0x1a108, 0x1a114, 0x1a120, 0x1a128, 0x1a130, 0x1a138, 0x1a138, 0x1a190, 0x1a1c4, 0x1a1fc, 0x1a1fc, 0x1e040, 0x1e04c, 0x1e284, 0x1e28c, 0x1e2c0, 0x1e2c0, 0x1e2e0, 0x1e2e0, 0x1e300, 0x1e384, 0x1e3c0, 0x1e3c8, 0x1e440, 0x1e44c, 0x1e684, 0x1e68c, 0x1e6c0, 0x1e6c0, 0x1e6e0, 0x1e6e0, 0x1e700, 0x1e784, 0x1e7c0, 0x1e7c8, 0x1e840, 0x1e84c, 0x1ea84, 0x1ea8c, 0x1eac0, 0x1eac0, 0x1eae0, 0x1eae0, 0x1eb00, 0x1eb84, 0x1ebc0, 0x1ebc8, 0x1ec40, 0x1ec4c, 0x1ee84, 0x1ee8c, 0x1eec0, 0x1eec0, 0x1eee0, 0x1eee0, 0x1ef00, 0x1ef84, 0x1efc0, 0x1efc8, 0x1f040, 0x1f04c, 0x1f284, 0x1f28c, 0x1f2c0, 0x1f2c0, 0x1f2e0, 0x1f2e0, 0x1f300, 0x1f384, 0x1f3c0, 0x1f3c8, 0x1f440, 0x1f44c, 0x1f684, 0x1f68c, 0x1f6c0, 0x1f6c0, 0x1f6e0, 0x1f6e0, 0x1f700, 0x1f784, 0x1f7c0, 0x1f7c8, 0x1f840, 0x1f84c, 0x1fa84, 0x1fa8c, 0x1fac0, 0x1fac0, 0x1fae0, 0x1fae0, 0x1fb00, 0x1fb84, 0x1fbc0, 0x1fbc8, 0x1fc40, 0x1fc4c, 0x1fe84, 0x1fe8c, 0x1fec0, 0x1fec0, 0x1fee0, 0x1fee0, 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x20000, 0x2002c, 0x20100, 0x2013c, 0x20190, 0x201a0, 0x201a8, 0x201b8, 0x201c4, 0x201c8, 0x20200, 0x20318, 0x20400, 0x204b4, 0x204c0, 0x20528, 0x20540, 0x20614, 0x21000, 0x21040, 0x2104c, 0x21060, 0x210c0, 0x210ec, 0x21200, 0x21268, 0x21270, 0x21284, 0x212fc, 0x21388, 0x21400, 0x21404, 0x21500, 0x21500, 0x21510, 0x21518, 0x2152c, 0x21530, 0x2153c, 0x2153c, 0x21550, 0x21554, 0x21600, 0x21600, 0x21608, 0x2161c, 0x21624, 0x21628, 0x21630, 0x21634, 0x2163c, 0x2163c, 0x21700, 0x2171c, 0x21780, 0x2178c, 0x21800, 0x21818, 0x21820, 0x21828, 0x21830, 0x21848, 0x21850, 0x21854, 0x21860, 0x21868, 0x21870, 0x21870, 0x21878, 0x21898, 0x218a0, 0x218a8, 0x218b0, 0x218c8, 0x218d0, 0x218d4, 0x218e0, 0x218e8, 0x218f0, 0x218f0, 0x218f8, 0x21a18, 0x21a20, 0x21a28, 0x21a30, 0x21a48, 0x21a50, 0x21a54, 0x21a60, 0x21a68, 0x21a70, 0x21a70, 0x21a78, 0x21a98, 0x21aa0, 0x21aa8, 0x21ab0, 0x21ac8, 0x21ad0, 0x21ad4, 0x21ae0, 0x21ae8, 0x21af0, 0x21af0, 0x21af8, 0x21c18, 0x21c20, 0x21c20, 0x21c28, 0x21c30, 0x21c38, 0x21c38, 0x21c80, 0x21c98, 0x21ca0, 0x21ca8, 0x21cb0, 0x21cc8, 0x21cd0, 0x21cd4, 0x21ce0, 0x21ce8, 0x21cf0, 0x21cf0, 0x21cf8, 0x21d7c, 0x21e00, 0x21e04, 0x22000, 0x2202c, 0x22100, 0x2213c, 0x22190, 0x221a0, 0x221a8, 0x221b8, 0x221c4, 0x221c8, 0x22200, 0x22318, 0x22400, 0x224b4, 0x224c0, 0x22528, 0x22540, 0x22614, 0x23000, 0x23040, 0x2304c, 0x23060, 0x230c0, 0x230ec, 0x23200, 0x23268, 0x23270, 0x23284, 0x232fc, 0x23388, 0x23400, 0x23404, 0x23500, 0x23500, 0x23510, 0x23518, 0x2352c, 0x23530, 0x2353c, 0x2353c, 0x23550, 0x23554, 0x23600, 0x23600, 0x23608, 0x2361c, 0x23624, 0x23628, 0x23630, 0x23634, 0x2363c, 0x2363c, 0x23700, 0x2371c, 0x23780, 0x2378c, 0x23800, 0x23818, 0x23820, 0x23828, 0x23830, 0x23848, 0x23850, 0x23854, 0x23860, 0x23868, 0x23870, 0x23870, 0x23878, 0x23898, 0x238a0, 0x238a8, 0x238b0, 0x238c8, 0x238d0, 0x238d4, 0x238e0, 0x238e8, 0x238f0, 0x238f0, 0x238f8, 0x23a18, 0x23a20, 0x23a28, 0x23a30, 0x23a48, 0x23a50, 0x23a54, 0x23a60, 0x23a68, 0x23a70, 0x23a70, 0x23a78, 0x23a98, 0x23aa0, 0x23aa8, 0x23ab0, 0x23ac8, 0x23ad0, 0x23ad4, 0x23ae0, 0x23ae8, 0x23af0, 0x23af0, 0x23af8, 0x23c18, 0x23c20, 0x23c20, 0x23c28, 0x23c30, 0x23c38, 0x23c38, 0x23c80, 0x23c98, 0x23ca0, 0x23ca8, 0x23cb0, 0x23cc8, 0x23cd0, 0x23cd4, 0x23ce0, 0x23ce8, 0x23cf0, 0x23cf0, 0x23cf8, 0x23d7c, 0x23e00, 0x23e04, 0x24000, 0x2402c, 0x24100, 0x2413c, 0x24190, 0x241a0, 0x241a8, 0x241b8, 0x241c4, 0x241c8, 0x24200, 0x24318, 0x24400, 0x244b4, 0x244c0, 0x24528, 0x24540, 0x24614, 0x25000, 0x25040, 0x2504c, 0x25060, 0x250c0, 0x250ec, 0x25200, 0x25268, 0x25270, 0x25284, 0x252fc, 0x25388, 0x25400, 0x25404, 0x25500, 0x25500, 0x25510, 0x25518, 0x2552c, 0x25530, 0x2553c, 0x2553c, 0x25550, 0x25554, 0x25600, 0x25600, 0x25608, 0x2561c, 0x25624, 0x25628, 0x25630, 0x25634, 0x2563c, 0x2563c, 0x25700, 0x2571c, 0x25780, 0x2578c, 0x25800, 0x25818, 0x25820, 0x25828, 0x25830, 0x25848, 0x25850, 0x25854, 0x25860, 0x25868, 0x25870, 0x25870, 0x25878, 0x25898, 0x258a0, 0x258a8, 0x258b0, 0x258c8, 0x258d0, 0x258d4, 0x258e0, 0x258e8, 0x258f0, 0x258f0, 0x258f8, 0x25a18, 0x25a20, 0x25a28, 0x25a30, 0x25a48, 0x25a50, 0x25a54, 0x25a60, 0x25a68, 0x25a70, 0x25a70, 0x25a78, 0x25a98, 0x25aa0, 0x25aa8, 0x25ab0, 0x25ac8, 0x25ad0, 0x25ad4, 0x25ae0, 0x25ae8, 0x25af0, 0x25af0, 0x25af8, 0x25c18, 0x25c20, 0x25c20, 0x25c28, 0x25c30, 0x25c38, 0x25c38, 0x25c80, 0x25c98, 0x25ca0, 0x25ca8, 0x25cb0, 0x25cc8, 0x25cd0, 0x25cd4, 0x25ce0, 0x25ce8, 0x25cf0, 0x25cf0, 0x25cf8, 0x25d7c, 0x25e00, 0x25e04, 0x26000, 0x2602c, 0x26100, 0x2613c, 0x26190, 0x261a0, 0x261a8, 0x261b8, 0x261c4, 0x261c8, 0x26200, 0x26318, 0x26400, 0x264b4, 0x264c0, 0x26528, 0x26540, 0x26614, 0x27000, 0x27040, 0x2704c, 0x27060, 0x270c0, 0x270ec, 0x27200, 0x27268, 0x27270, 0x27284, 0x272fc, 0x27388, 0x27400, 0x27404, 0x27500, 0x27500, 0x27510, 0x27518, 0x2752c, 0x27530, 0x2753c, 0x2753c, 0x27550, 0x27554, 0x27600, 0x27600, 0x27608, 0x2761c, 0x27624, 0x27628, 0x27630, 0x27634, 0x2763c, 0x2763c, 0x27700, 0x2771c, 0x27780, 0x2778c, 0x27800, 0x27818, 0x27820, 0x27828, 0x27830, 0x27848, 0x27850, 0x27854, 0x27860, 0x27868, 0x27870, 0x27870, 0x27878, 0x27898, 0x278a0, 0x278a8, 0x278b0, 0x278c8, 0x278d0, 0x278d4, 0x278e0, 0x278e8, 0x278f0, 0x278f0, 0x278f8, 0x27a18, 0x27a20, 0x27a28, 0x27a30, 0x27a48, 0x27a50, 0x27a54, 0x27a60, 0x27a68, 0x27a70, 0x27a70, 0x27a78, 0x27a98, 0x27aa0, 0x27aa8, 0x27ab0, 0x27ac8, 0x27ad0, 0x27ad4, 0x27ae0, 0x27ae8, 0x27af0, 0x27af0, 0x27af8, 0x27c18, 0x27c20, 0x27c20, 0x27c28, 0x27c30, 0x27c38, 0x27c38, 0x27c80, 0x27c98, 0x27ca0, 0x27ca8, 0x27cb0, 0x27cc8, 0x27cd0, 0x27cd4, 0x27ce0, 0x27ce8, 0x27cf0, 0x27cf0, 0x27cf8, 0x27d7c, 0x27e00, 0x27e04, }; static const unsigned int t4vf_reg_ranges[] = { VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), VF_MPS_REG(A_MPS_VF_CTL), VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI), VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), FW_T4VF_MBDATA_BASE_ADDR, FW_T4VF_MBDATA_BASE_ADDR + ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), }; static const unsigned int t5_reg_ranges[] = { 0x1008, 0x10c0, 0x10cc, 0x10f8, 0x1100, 0x1100, 0x110c, 0x1148, 0x1180, 0x1184, 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, 0x11fc, 0x123c, 0x1280, 0x173c, 0x1800, 0x18fc, 0x3000, 0x3028, 0x3060, 0x30b0, 0x30b8, 0x30d8, 0x30e0, 0x30fc, 0x3140, 0x357c, 0x35a8, 0x35cc, 0x35ec, 0x35ec, 0x3600, 0x5624, 0x56cc, 0x56ec, 0x56f4, 0x5720, 0x5728, 0x575c, 0x580c, 0x5814, 0x5890, 0x589c, 0x58a4, 0x58ac, 0x58b8, 0x58bc, 0x5940, 0x59c8, 0x59d0, 0x59dc, 0x59fc, 0x5a18, 0x5a60, 0x5a70, 0x5a80, 0x5a9c, 0x5b94, 0x5bfc, 0x6000, 0x6020, 0x6028, 0x6040, 0x6058, 0x609c, 0x60a8, 0x614c, 0x7700, 0x7798, 0x77c0, 0x78fc, 0x7b00, 0x7b58, 0x7b60, 0x7b84, 0x7b8c, 0x7c54, 0x7d00, 0x7d38, 0x7d40, 0x7d80, 0x7d8c, 0x7ddc, 0x7de4, 0x7e04, 0x7e10, 0x7e1c, 0x7e24, 0x7e38, 0x7e40, 0x7e44, 0x7e4c, 0x7e78, 0x7e80, 0x7edc, 0x7ee8, 0x7efc, 0x8dc0, 0x8de0, 0x8df8, 0x8e04, 0x8e10, 0x8e84, 0x8ea0, 0x8f84, 0x8fc0, 0x9058, 0x9060, 0x9060, 0x9068, 0x90f8, 0x9400, 0x9408, 0x9410, 0x9470, 0x9600, 0x9600, 0x9608, 0x9638, 0x9640, 0x96f4, 0x9800, 0x9808, 0x9820, 0x983c, 0x9850, 0x9864, 0x9c00, 0x9c6c, 0x9c80, 0x9cec, 0x9d00, 0x9d6c, 0x9d80, 0x9dec, 0x9e00, 0x9e6c, 0x9e80, 0x9eec, 0x9f00, 0x9f6c, 0x9f80, 0xa020, 0xd004, 0xd004, 0xd010, 0xd03c, 0xdfc0, 0xdfe0, 0xe000, 0x1106c, 0x11074, 0x11088, 0x1109c, 0x1117c, 0x11190, 0x11204, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x190e8, 0x190f0, 0x190f8, 0x19100, 0x19110, 0x19120, 0x19124, 0x19150, 0x19194, 0x1919c, 0x191b0, 0x191d0, 0x191e8, 0x19238, 0x19290, 0x193f8, 0x19428, 0x19430, 0x19444, 0x1944c, 0x1946c, 0x19474, 0x19474, 0x19490, 0x194cc, 0x194f0, 0x194f8, 0x19c00, 0x19c08, 0x19c10, 0x19c60, 0x19c94, 0x19ce4, 0x19cf0, 0x19d40, 0x19d50, 0x19d94, 0x19da0, 0x19de8, 0x19df0, 0x19e10, 0x19e50, 0x19e90, 0x19ea0, 0x19f24, 0x19f34, 0x19f34, 0x19f40, 0x19f50, 0x19f90, 0x19fb4, 0x19fc4, 0x19fe4, 0x1a000, 0x1a004, 0x1a010, 0x1a06c, 0x1a0b0, 0x1a0e4, 0x1a0ec, 0x1a0f8, 0x1a100, 0x1a108, 0x1a114, 0x1a120, 0x1a128, 0x1a130, 0x1a138, 0x1a138, 0x1a190, 0x1a1c4, 0x1a1fc, 0x1a1fc, 0x1e008, 0x1e00c, 0x1e040, 0x1e044, 0x1e04c, 0x1e04c, 0x1e284, 0x1e290, 0x1e2c0, 0x1e2c0, 0x1e2e0, 0x1e2e0, 0x1e300, 0x1e384, 0x1e3c0, 0x1e3c8, 0x1e408, 0x1e40c, 0x1e440, 0x1e444, 0x1e44c, 0x1e44c, 0x1e684, 0x1e690, 0x1e6c0, 0x1e6c0, 0x1e6e0, 0x1e6e0, 0x1e700, 0x1e784, 0x1e7c0, 0x1e7c8, 0x1e808, 0x1e80c, 0x1e840, 0x1e844, 0x1e84c, 0x1e84c, 0x1ea84, 0x1ea90, 0x1eac0, 0x1eac0, 0x1eae0, 0x1eae0, 0x1eb00, 0x1eb84, 0x1ebc0, 0x1ebc8, 0x1ec08, 0x1ec0c, 0x1ec40, 0x1ec44, 0x1ec4c, 0x1ec4c, 0x1ee84, 0x1ee90, 0x1eec0, 0x1eec0, 0x1eee0, 0x1eee0, 0x1ef00, 0x1ef84, 0x1efc0, 0x1efc8, 0x1f008, 0x1f00c, 0x1f040, 0x1f044, 0x1f04c, 0x1f04c, 0x1f284, 0x1f290, 0x1f2c0, 0x1f2c0, 0x1f2e0, 0x1f2e0, 0x1f300, 0x1f384, 0x1f3c0, 0x1f3c8, 0x1f408, 0x1f40c, 0x1f440, 0x1f444, 0x1f44c, 0x1f44c, 0x1f684, 0x1f690, 0x1f6c0, 0x1f6c0, 0x1f6e0, 0x1f6e0, 0x1f700, 0x1f784, 0x1f7c0, 0x1f7c8, 0x1f808, 0x1f80c, 0x1f840, 0x1f844, 0x1f84c, 0x1f84c, 0x1fa84, 0x1fa90, 0x1fac0, 0x1fac0, 0x1fae0, 0x1fae0, 0x1fb00, 0x1fb84, 0x1fbc0, 0x1fbc8, 0x1fc08, 0x1fc0c, 0x1fc40, 0x1fc44, 0x1fc4c, 0x1fc4c, 0x1fe84, 0x1fe90, 0x1fec0, 0x1fec0, 0x1fee0, 0x1fee0, 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x30000, 0x30030, 0x30100, 0x30144, 0x30190, 0x301a0, 0x301a8, 0x301b8, 0x301c4, 0x301c8, 0x301d0, 0x301d0, 0x30200, 0x30318, 0x30400, 0x304b4, 0x304c0, 0x3052c, 0x30540, 0x3061c, 0x30800, 0x30828, 0x30834, 0x30834, 0x308c0, 0x30908, 0x30910, 0x309ac, 0x30a00, 0x30a14, 0x30a1c, 0x30a2c, 0x30a44, 0x30a50, 0x30a74, 0x30a74, 0x30a7c, 0x30afc, 0x30b08, 0x30c24, 0x30d00, 0x30d00, 0x30d08, 0x30d14, 0x30d1c, 0x30d20, 0x30d3c, 0x30d3c, 0x30d48, 0x30d50, 0x31200, 0x3120c, 0x31220, 0x31220, 0x31240, 0x31240, 0x31600, 0x3160c, 0x31a00, 0x31a1c, 0x31e00, 0x31e20, 0x31e38, 0x31e3c, 0x31e80, 0x31e80, 0x31e88, 0x31ea8, 0x31eb0, 0x31eb4, 0x31ec8, 0x31ed4, 0x31fb8, 0x32004, 0x32200, 0x32200, 0x32208, 0x32240, 0x32248, 0x32280, 0x32288, 0x322c0, 0x322c8, 0x322fc, 0x32600, 0x32630, 0x32a00, 0x32abc, 0x32b00, 0x32b10, 0x32b20, 0x32b30, 0x32b40, 0x32b50, 0x32b60, 0x32b70, 0x33000, 0x33028, 0x33030, 0x33048, 0x33060, 0x33068, 0x33070, 0x3309c, 0x330f0, 0x33128, 0x33130, 0x33148, 0x33160, 0x33168, 0x33170, 0x3319c, 0x331f0, 0x33238, 0x33240, 0x33240, 0x33248, 0x33250, 0x3325c, 0x33264, 0x33270, 0x332b8, 0x332c0, 0x332e4, 0x332f8, 0x33338, 0x33340, 0x33340, 0x33348, 0x33350, 0x3335c, 0x33364, 0x33370, 0x333b8, 0x333c0, 0x333e4, 0x333f8, 0x33428, 0x33430, 0x33448, 0x33460, 0x33468, 0x33470, 0x3349c, 0x334f0, 0x33528, 0x33530, 0x33548, 0x33560, 0x33568, 0x33570, 0x3359c, 0x335f0, 0x33638, 0x33640, 0x33640, 0x33648, 0x33650, 0x3365c, 0x33664, 0x33670, 0x336b8, 0x336c0, 0x336e4, 0x336f8, 0x33738, 0x33740, 0x33740, 0x33748, 0x33750, 0x3375c, 0x33764, 0x33770, 0x337b8, 0x337c0, 0x337e4, 0x337f8, 0x337fc, 0x33814, 0x33814, 0x3382c, 0x3382c, 0x33880, 0x3388c, 0x338e8, 0x338ec, 0x33900, 0x33928, 0x33930, 0x33948, 0x33960, 0x33968, 0x33970, 0x3399c, 0x339f0, 0x33a38, 0x33a40, 0x33a40, 0x33a48, 0x33a50, 0x33a5c, 0x33a64, 0x33a70, 0x33ab8, 0x33ac0, 0x33ae4, 0x33af8, 0x33b10, 0x33b28, 0x33b28, 0x33b3c, 0x33b50, 0x33bf0, 0x33c10, 0x33c28, 0x33c28, 0x33c3c, 0x33c50, 0x33cf0, 0x33cfc, 0x34000, 0x34030, 0x34100, 0x34144, 0x34190, 0x341a0, 0x341a8, 0x341b8, 0x341c4, 0x341c8, 0x341d0, 0x341d0, 0x34200, 0x34318, 0x34400, 0x344b4, 0x344c0, 0x3452c, 0x34540, 0x3461c, 0x34800, 0x34828, 0x34834, 0x34834, 0x348c0, 0x34908, 0x34910, 0x349ac, 0x34a00, 0x34a14, 0x34a1c, 0x34a2c, 0x34a44, 0x34a50, 0x34a74, 0x34a74, 0x34a7c, 0x34afc, 0x34b08, 0x34c24, 0x34d00, 0x34d00, 0x34d08, 0x34d14, 0x34d1c, 0x34d20, 0x34d3c, 0x34d3c, 0x34d48, 0x34d50, 0x35200, 0x3520c, 0x35220, 0x35220, 0x35240, 0x35240, 0x35600, 0x3560c, 0x35a00, 0x35a1c, 0x35e00, 0x35e20, 0x35e38, 0x35e3c, 0x35e80, 0x35e80, 0x35e88, 0x35ea8, 0x35eb0, 0x35eb4, 0x35ec8, 0x35ed4, 0x35fb8, 0x36004, 0x36200, 0x36200, 0x36208, 0x36240, 0x36248, 0x36280, 0x36288, 0x362c0, 0x362c8, 0x362fc, 0x36600, 0x36630, 0x36a00, 0x36abc, 0x36b00, 0x36b10, 0x36b20, 0x36b30, 0x36b40, 0x36b50, 0x36b60, 0x36b70, 0x37000, 0x37028, 0x37030, 0x37048, 0x37060, 0x37068, 0x37070, 0x3709c, 0x370f0, 0x37128, 0x37130, 0x37148, 0x37160, 0x37168, 0x37170, 0x3719c, 0x371f0, 0x37238, 0x37240, 0x37240, 0x37248, 0x37250, 0x3725c, 0x37264, 0x37270, 0x372b8, 0x372c0, 0x372e4, 0x372f8, 0x37338, 0x37340, 0x37340, 0x37348, 0x37350, 0x3735c, 0x37364, 0x37370, 0x373b8, 0x373c0, 0x373e4, 0x373f8, 0x37428, 0x37430, 0x37448, 0x37460, 0x37468, 0x37470, 0x3749c, 0x374f0, 0x37528, 0x37530, 0x37548, 0x37560, 0x37568, 0x37570, 0x3759c, 0x375f0, 0x37638, 0x37640, 0x37640, 0x37648, 0x37650, 0x3765c, 0x37664, 0x37670, 0x376b8, 0x376c0, 0x376e4, 0x376f8, 0x37738, 0x37740, 0x37740, 0x37748, 0x37750, 0x3775c, 0x37764, 0x37770, 0x377b8, 0x377c0, 0x377e4, 0x377f8, 0x377fc, 0x37814, 0x37814, 0x3782c, 0x3782c, 0x37880, 0x3788c, 0x378e8, 0x378ec, 0x37900, 0x37928, 0x37930, 0x37948, 0x37960, 0x37968, 0x37970, 0x3799c, 0x379f0, 0x37a38, 0x37a40, 0x37a40, 0x37a48, 0x37a50, 0x37a5c, 0x37a64, 0x37a70, 0x37ab8, 0x37ac0, 0x37ae4, 0x37af8, 0x37b10, 0x37b28, 0x37b28, 0x37b3c, 0x37b50, 0x37bf0, 0x37c10, 0x37c28, 0x37c28, 0x37c3c, 0x37c50, 0x37cf0, 0x37cfc, 0x38000, 0x38030, 0x38100, 0x38144, 0x38190, 0x381a0, 0x381a8, 0x381b8, 0x381c4, 0x381c8, 0x381d0, 0x381d0, 0x38200, 0x38318, 0x38400, 0x384b4, 0x384c0, 0x3852c, 0x38540, 0x3861c, 0x38800, 0x38828, 0x38834, 0x38834, 0x388c0, 0x38908, 0x38910, 0x389ac, 0x38a00, 0x38a14, 0x38a1c, 0x38a2c, 0x38a44, 0x38a50, 0x38a74, 0x38a74, 0x38a7c, 0x38afc, 0x38b08, 0x38c24, 0x38d00, 0x38d00, 0x38d08, 0x38d14, 0x38d1c, 0x38d20, 0x38d3c, 0x38d3c, 0x38d48, 0x38d50, 0x39200, 0x3920c, 0x39220, 0x39220, 0x39240, 0x39240, 0x39600, 0x3960c, 0x39a00, 0x39a1c, 0x39e00, 0x39e20, 0x39e38, 0x39e3c, 0x39e80, 0x39e80, 0x39e88, 0x39ea8, 0x39eb0, 0x39eb4, 0x39ec8, 0x39ed4, 0x39fb8, 0x3a004, 0x3a200, 0x3a200, 0x3a208, 0x3a240, 0x3a248, 0x3a280, 0x3a288, 0x3a2c0, 0x3a2c8, 0x3a2fc, 0x3a600, 0x3a630, 0x3aa00, 0x3aabc, 0x3ab00, 0x3ab10, 0x3ab20, 0x3ab30, 0x3ab40, 0x3ab50, 0x3ab60, 0x3ab70, 0x3b000, 0x3b028, 0x3b030, 0x3b048, 0x3b060, 0x3b068, 0x3b070, 0x3b09c, 0x3b0f0, 0x3b128, 0x3b130, 0x3b148, 0x3b160, 0x3b168, 0x3b170, 0x3b19c, 0x3b1f0, 0x3b238, 0x3b240, 0x3b240, 0x3b248, 0x3b250, 0x3b25c, 0x3b264, 0x3b270, 0x3b2b8, 0x3b2c0, 0x3b2e4, 0x3b2f8, 0x3b338, 0x3b340, 0x3b340, 0x3b348, 0x3b350, 0x3b35c, 0x3b364, 0x3b370, 0x3b3b8, 0x3b3c0, 0x3b3e4, 0x3b3f8, 0x3b428, 0x3b430, 0x3b448, 0x3b460, 0x3b468, 0x3b470, 0x3b49c, 0x3b4f0, 0x3b528, 0x3b530, 0x3b548, 0x3b560, 0x3b568, 0x3b570, 0x3b59c, 0x3b5f0, 0x3b638, 0x3b640, 0x3b640, 0x3b648, 0x3b650, 0x3b65c, 0x3b664, 0x3b670, 0x3b6b8, 0x3b6c0, 0x3b6e4, 0x3b6f8, 0x3b738, 0x3b740, 0x3b740, 0x3b748, 0x3b750, 0x3b75c, 0x3b764, 0x3b770, 0x3b7b8, 0x3b7c0, 0x3b7e4, 0x3b7f8, 0x3b7fc, 0x3b814, 0x3b814, 0x3b82c, 0x3b82c, 0x3b880, 0x3b88c, 0x3b8e8, 0x3b8ec, 0x3b900, 0x3b928, 0x3b930, 0x3b948, 0x3b960, 0x3b968, 0x3b970, 0x3b99c, 0x3b9f0, 0x3ba38, 0x3ba40, 0x3ba40, 0x3ba48, 0x3ba50, 0x3ba5c, 0x3ba64, 0x3ba70, 0x3bab8, 0x3bac0, 0x3bae4, 0x3baf8, 0x3bb10, 0x3bb28, 0x3bb28, 0x3bb3c, 0x3bb50, 0x3bbf0, 0x3bc10, 0x3bc28, 0x3bc28, 0x3bc3c, 0x3bc50, 0x3bcf0, 0x3bcfc, 0x3c000, 0x3c030, 0x3c100, 0x3c144, 0x3c190, 0x3c1a0, 0x3c1a8, 0x3c1b8, 0x3c1c4, 0x3c1c8, 0x3c1d0, 0x3c1d0, 0x3c200, 0x3c318, 0x3c400, 0x3c4b4, 0x3c4c0, 0x3c52c, 0x3c540, 0x3c61c, 0x3c800, 0x3c828, 0x3c834, 0x3c834, 0x3c8c0, 0x3c908, 0x3c910, 0x3c9ac, 0x3ca00, 0x3ca14, 0x3ca1c, 0x3ca2c, 0x3ca44, 0x3ca50, 0x3ca74, 0x3ca74, 0x3ca7c, 0x3cafc, 0x3cb08, 0x3cc24, 0x3cd00, 0x3cd00, 0x3cd08, 0x3cd14, 0x3cd1c, 0x3cd20, 0x3cd3c, 0x3cd3c, 0x3cd48, 0x3cd50, 0x3d200, 0x3d20c, 0x3d220, 0x3d220, 0x3d240, 0x3d240, 0x3d600, 0x3d60c, 0x3da00, 0x3da1c, 0x3de00, 0x3de20, 0x3de38, 0x3de3c, 0x3de80, 0x3de80, 0x3de88, 0x3dea8, 0x3deb0, 0x3deb4, 0x3dec8, 0x3ded4, 0x3dfb8, 0x3e004, 0x3e200, 0x3e200, 0x3e208, 0x3e240, 0x3e248, 0x3e280, 0x3e288, 0x3e2c0, 0x3e2c8, 0x3e2fc, 0x3e600, 0x3e630, 0x3ea00, 0x3eabc, 0x3eb00, 0x3eb10, 0x3eb20, 0x3eb30, 0x3eb40, 0x3eb50, 0x3eb60, 0x3eb70, 0x3f000, 0x3f028, 0x3f030, 0x3f048, 0x3f060, 0x3f068, 0x3f070, 0x3f09c, 0x3f0f0, 0x3f128, 0x3f130, 0x3f148, 0x3f160, 0x3f168, 0x3f170, 0x3f19c, 0x3f1f0, 0x3f238, 0x3f240, 0x3f240, 0x3f248, 0x3f250, 0x3f25c, 0x3f264, 0x3f270, 0x3f2b8, 0x3f2c0, 0x3f2e4, 0x3f2f8, 0x3f338, 0x3f340, 0x3f340, 0x3f348, 0x3f350, 0x3f35c, 0x3f364, 0x3f370, 0x3f3b8, 0x3f3c0, 0x3f3e4, 0x3f3f8, 0x3f428, 0x3f430, 0x3f448, 0x3f460, 0x3f468, 0x3f470, 0x3f49c, 0x3f4f0, 0x3f528, 0x3f530, 0x3f548, 0x3f560, 0x3f568, 0x3f570, 0x3f59c, 0x3f5f0, 0x3f638, 0x3f640, 0x3f640, 0x3f648, 0x3f650, 0x3f65c, 0x3f664, 0x3f670, 0x3f6b8, 0x3f6c0, 0x3f6e4, 0x3f6f8, 0x3f738, 0x3f740, 0x3f740, 0x3f748, 0x3f750, 0x3f75c, 0x3f764, 0x3f770, 0x3f7b8, 0x3f7c0, 0x3f7e4, 0x3f7f8, 0x3f7fc, 0x3f814, 0x3f814, 0x3f82c, 0x3f82c, 0x3f880, 0x3f88c, 0x3f8e8, 0x3f8ec, 0x3f900, 0x3f928, 0x3f930, 0x3f948, 0x3f960, 0x3f968, 0x3f970, 0x3f99c, 0x3f9f0, 0x3fa38, 0x3fa40, 0x3fa40, 0x3fa48, 0x3fa50, 0x3fa5c, 0x3fa64, 0x3fa70, 0x3fab8, 0x3fac0, 0x3fae4, 0x3faf8, 0x3fb10, 0x3fb28, 0x3fb28, 0x3fb3c, 0x3fb50, 0x3fbf0, 0x3fc10, 0x3fc28, 0x3fc28, 0x3fc3c, 0x3fc50, 0x3fcf0, 0x3fcfc, 0x40000, 0x4000c, 0x40040, 0x40050, 0x40060, 0x40068, 0x4007c, 0x4008c, 0x40094, 0x400b0, 0x400c0, 0x40144, 0x40180, 0x4018c, 0x40200, 0x40254, 0x40260, 0x40264, 0x40270, 0x40288, 0x40290, 0x40298, 0x402ac, 0x402c8, 0x402d0, 0x402e0, 0x402f0, 0x402f0, 0x40300, 0x4033c, 0x403f8, 0x403fc, 0x41304, 0x413c4, 0x41400, 0x4140c, 0x41414, 0x4141c, 0x41480, 0x414d0, 0x44000, 0x44054, 0x4405c, 0x44078, 0x440c0, 0x44174, 0x44180, 0x441ac, 0x441b4, 0x441b8, 0x441c0, 0x44254, 0x4425c, 0x44278, 0x442c0, 0x44374, 0x44380, 0x443ac, 0x443b4, 0x443b8, 0x443c0, 0x44454, 0x4445c, 0x44478, 0x444c0, 0x44574, 0x44580, 0x445ac, 0x445b4, 0x445b8, 0x445c0, 0x44654, 0x4465c, 0x44678, 0x446c0, 0x44774, 0x44780, 0x447ac, 0x447b4, 0x447b8, 0x447c0, 0x44854, 0x4485c, 0x44878, 0x448c0, 0x44974, 0x44980, 0x449ac, 0x449b4, 0x449b8, 0x449c0, 0x449fc, 0x45000, 0x45004, 0x45010, 0x45030, 0x45040, 0x45060, 0x45068, 0x45068, 0x45080, 0x45084, 0x450a0, 0x450b0, 0x45200, 0x45204, 0x45210, 0x45230, 0x45240, 0x45260, 0x45268, 0x45268, 0x45280, 0x45284, 0x452a0, 0x452b0, 0x460c0, 0x460e4, 0x47000, 0x4703c, 0x47044, 0x4708c, 0x47200, 0x47250, 0x47400, 0x47408, 0x47414, 0x47420, 0x47600, 0x47618, 0x47800, 0x47814, 0x48000, 0x4800c, 0x48040, 0x48050, 0x48060, 0x48068, 0x4807c, 0x4808c, 0x48094, 0x480b0, 0x480c0, 0x48144, 0x48180, 0x4818c, 0x48200, 0x48254, 0x48260, 0x48264, 0x48270, 0x48288, 0x48290, 0x48298, 0x482ac, 0x482c8, 0x482d0, 0x482e0, 0x482f0, 0x482f0, 0x48300, 0x4833c, 0x483f8, 0x483fc, 0x49304, 0x493c4, 0x49400, 0x4940c, 0x49414, 0x4941c, 0x49480, 0x494d0, 0x4c000, 0x4c054, 0x4c05c, 0x4c078, 0x4c0c0, 0x4c174, 0x4c180, 0x4c1ac, 0x4c1b4, 0x4c1b8, 0x4c1c0, 0x4c254, 0x4c25c, 0x4c278, 0x4c2c0, 0x4c374, 0x4c380, 0x4c3ac, 0x4c3b4, 0x4c3b8, 0x4c3c0, 0x4c454, 0x4c45c, 0x4c478, 0x4c4c0, 0x4c574, 0x4c580, 0x4c5ac, 0x4c5b4, 0x4c5b8, 0x4c5c0, 0x4c654, 0x4c65c, 0x4c678, 0x4c6c0, 0x4c774, 0x4c780, 0x4c7ac, 0x4c7b4, 0x4c7b8, 0x4c7c0, 0x4c854, 0x4c85c, 0x4c878, 0x4c8c0, 0x4c974, 0x4c980, 0x4c9ac, 0x4c9b4, 0x4c9b8, 0x4c9c0, 0x4c9fc, 0x4d000, 0x4d004, 0x4d010, 0x4d030, 0x4d040, 0x4d060, 0x4d068, 0x4d068, 0x4d080, 0x4d084, 0x4d0a0, 0x4d0b0, 0x4d200, 0x4d204, 0x4d210, 0x4d230, 0x4d240, 0x4d260, 0x4d268, 0x4d268, 0x4d280, 0x4d284, 0x4d2a0, 0x4d2b0, 0x4e0c0, 0x4e0e4, 0x4f000, 0x4f03c, 0x4f044, 0x4f08c, 0x4f200, 0x4f250, 0x4f400, 0x4f408, 0x4f414, 0x4f420, 0x4f600, 0x4f618, 0x4f800, 0x4f814, 0x50000, 0x50084, 0x50090, 0x500cc, 0x50400, 0x50400, 0x50800, 0x50884, 0x50890, 0x508cc, 0x50c00, 0x50c00, 0x51000, 0x5101c, 0x51300, 0x51308, }; static const unsigned int t5vf_reg_ranges[] = { VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), VF_MPS_REG(A_MPS_VF_CTL), VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), FW_T4VF_MBDATA_BASE_ADDR, FW_T4VF_MBDATA_BASE_ADDR + ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), }; static const unsigned int t6_reg_ranges[] = { 0x1008, 0x101c, 0x1024, 0x10a8, 0x10b4, 0x10f8, 0x1100, 0x1114, 0x111c, 0x112c, 0x1138, 0x113c, 0x1144, 0x114c, 0x1180, 0x1184, 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, 0x11fc, 0x1274, 0x1280, 0x133c, 0x1800, 0x18fc, 0x3000, 0x302c, 0x3060, 0x30b0, 0x30b8, 0x30d8, 0x30e0, 0x30fc, 0x3140, 0x357c, 0x35a8, 0x35cc, 0x35ec, 0x35ec, 0x3600, 0x5624, 0x56cc, 0x56ec, 0x56f4, 0x5720, 0x5728, 0x575c, 0x580c, 0x5814, 0x5890, 0x589c, 0x58a4, 0x58ac, 0x58b8, 0x58bc, 0x5940, 0x595c, 0x5980, 0x598c, 0x59b0, 0x59c8, 0x59d0, 0x59dc, 0x59fc, 0x5a18, 0x5a60, 0x5a6c, 0x5a80, 0x5a8c, 0x5a94, 0x5a9c, 0x5b94, 0x5bfc, 0x5c10, 0x5e48, 0x5e50, 0x5e94, 0x5ea0, 0x5eb0, 0x5ec0, 0x5ec0, 0x5ec8, 0x5ed0, 0x5ee0, 0x5ee0, 0x5ef0, 0x5ef0, 0x5f00, 0x5f00, 0x6000, 0x6020, 0x6028, 0x6040, 0x6058, 0x609c, 0x60a8, 0x619c, 0x7700, 0x7798, 0x77c0, 0x7880, 0x78cc, 0x78fc, 0x7b00, 0x7b58, 0x7b60, 0x7b84, 0x7b8c, 0x7c54, 0x7d00, 0x7d38, 0x7d40, 0x7d84, 0x7d8c, 0x7ddc, 0x7de4, 0x7e04, 0x7e10, 0x7e1c, 0x7e24, 0x7e38, 0x7e40, 0x7e44, 0x7e4c, 0x7e78, 0x7e80, 0x7edc, 0x7ee8, 0x7efc, 0x8dc0, 0x8de4, 0x8df8, 0x8e04, 0x8e10, 0x8e84, 0x8ea0, 0x8f88, 0x8fb8, 0x9058, 0x9060, 0x9060, 0x9068, 0x90f8, 0x9100, 0x9124, 0x9400, 0x9470, 0x9600, 0x9600, 0x9608, 0x9638, 0x9640, 0x9704, 0x9710, 0x971c, 0x9800, 0x9808, 0x9820, 0x983c, 0x9850, 0x9864, 0x9c00, 0x9c6c, 0x9c80, 0x9cec, 0x9d00, 0x9d6c, 0x9d80, 0x9dec, 0x9e00, 0x9e6c, 0x9e80, 0x9eec, 0x9f00, 0x9f6c, 0x9f80, 0xa020, 0xd004, 0xd03c, 0xd100, 0xd118, 0xd200, 0xd214, 0xd220, 0xd234, 0xd240, 0xd254, 0xd260, 0xd274, 0xd280, 0xd294, 0xd2a0, 0xd2b4, 0xd2c0, 0xd2d4, 0xd2e0, 0xd2f4, 0xd300, 0xd31c, 0xdfc0, 0xdfe0, 0xe000, 0xf008, 0xf010, 0xf018, 0xf020, 0xf028, 0x11000, 0x11014, 0x11048, 0x1106c, 0x11074, 0x11088, 0x11098, 0x11120, 0x1112c, 0x1117c, 0x11190, 0x112e0, 0x11300, 0x1130c, 0x12000, 0x1206c, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x190e8, 0x190f0, 0x190f8, 0x19100, 0x19110, 0x19120, 0x19124, 0x19150, 0x19194, 0x1919c, 0x191b0, 0x191d0, 0x191e8, 0x19238, 0x19290, 0x192a4, 0x192b0, 0x192bc, 0x192bc, 0x19348, 0x1934c, 0x193f8, 0x19418, 0x19420, 0x19428, 0x19430, 0x19444, 0x1944c, 0x1946c, 0x19474, 0x19474, 0x19490, 0x194cc, 0x194f0, 0x194f8, 0x19c00, 0x19c48, 0x19c50, 0x19c80, 0x19c94, 0x19c98, 0x19ca0, 0x19cbc, 0x19ce4, 0x19ce4, 0x19cf0, 0x19cf8, 0x19d00, 0x19d28, 0x19d50, 0x19d78, 0x19d94, 0x19d98, 0x19da0, 0x19dc8, 0x19df0, 0x19e10, 0x19e50, 0x19e6c, 0x19ea0, 0x19ebc, 0x19ec4, 0x19ef4, 0x19f04, 0x19f2c, 0x19f34, 0x19f34, 0x19f40, 0x19f50, 0x19f90, 0x19fac, 0x19fc4, 0x19fc8, 0x19fd0, 0x19fe4, 0x1a000, 0x1a004, 0x1a010, 0x1a06c, 0x1a0b0, 0x1a0e4, 0x1a0ec, 0x1a0f8, 0x1a100, 0x1a108, 0x1a114, 0x1a120, 0x1a128, 0x1a130, 0x1a138, 0x1a138, 0x1a190, 0x1a1c4, 0x1a1fc, 0x1a1fc, 0x1e008, 0x1e00c, 0x1e040, 0x1e044, 0x1e04c, 0x1e04c, 0x1e284, 0x1e290, 0x1e2c0, 0x1e2c0, 0x1e2e0, 0x1e2e0, 0x1e300, 0x1e384, 0x1e3c0, 0x1e3c8, 0x1e408, 0x1e40c, 0x1e440, 0x1e444, 0x1e44c, 0x1e44c, 0x1e684, 0x1e690, 0x1e6c0, 0x1e6c0, 0x1e6e0, 0x1e6e0, 0x1e700, 0x1e784, 0x1e7c0, 0x1e7c8, 0x1e808, 0x1e80c, 0x1e840, 0x1e844, 0x1e84c, 0x1e84c, 0x1ea84, 0x1ea90, 0x1eac0, 0x1eac0, 0x1eae0, 0x1eae0, 0x1eb00, 0x1eb84, 0x1ebc0, 0x1ebc8, 0x1ec08, 0x1ec0c, 0x1ec40, 0x1ec44, 0x1ec4c, 0x1ec4c, 0x1ee84, 0x1ee90, 0x1eec0, 0x1eec0, 0x1eee0, 0x1eee0, 0x1ef00, 0x1ef84, 0x1efc0, 0x1efc8, 0x1f008, 0x1f00c, 0x1f040, 0x1f044, 0x1f04c, 0x1f04c, 0x1f284, 0x1f290, 0x1f2c0, 0x1f2c0, 0x1f2e0, 0x1f2e0, 0x1f300, 0x1f384, 0x1f3c0, 0x1f3c8, 0x1f408, 0x1f40c, 0x1f440, 0x1f444, 0x1f44c, 0x1f44c, 0x1f684, 0x1f690, 0x1f6c0, 0x1f6c0, 0x1f6e0, 0x1f6e0, 0x1f700, 0x1f784, 0x1f7c0, 0x1f7c8, 0x1f808, 0x1f80c, 0x1f840, 0x1f844, 0x1f84c, 0x1f84c, 0x1fa84, 0x1fa90, 0x1fac0, 0x1fac0, 0x1fae0, 0x1fae0, 0x1fb00, 0x1fb84, 0x1fbc0, 0x1fbc8, 0x1fc08, 0x1fc0c, 0x1fc40, 0x1fc44, 0x1fc4c, 0x1fc4c, 0x1fe84, 0x1fe90, 0x1fec0, 0x1fec0, 0x1fee0, 0x1fee0, 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x30000, 0x30030, 0x30100, 0x30168, 0x30190, 0x301a0, 0x301a8, 0x301b8, 0x301c4, 0x301c8, 0x301d0, 0x301d0, 0x30200, 0x30320, 0x30400, 0x304b4, 0x304c0, 0x3052c, 0x30540, 0x3061c, 0x30800, 0x308a0, 0x308c0, 0x30908, 0x30910, 0x309b8, 0x30a00, 0x30a04, 0x30a0c, 0x30a14, 0x30a1c, 0x30a2c, 0x30a44, 0x30a50, 0x30a74, 0x30a74, 0x30a7c, 0x30afc, 0x30b08, 0x30c24, 0x30d00, 0x30d14, 0x30d1c, 0x30d3c, 0x30d44, 0x30d4c, 0x30d54, 0x30d74, 0x30d7c, 0x30d7c, 0x30de0, 0x30de0, 0x30e00, 0x30ed4, 0x30f00, 0x30fa4, 0x30fc0, 0x30fc4, 0x31000, 0x31004, 0x31080, 0x310fc, 0x31208, 0x31220, 0x3123c, 0x31254, 0x31300, 0x31300, 0x31308, 0x3131c, 0x31338, 0x3133c, 0x31380, 0x31380, 0x31388, 0x313a8, 0x313b4, 0x313b4, 0x31400, 0x31420, 0x31438, 0x3143c, 0x31480, 0x31480, 0x314a8, 0x314a8, 0x314b0, 0x314b4, 0x314c8, 0x314d4, 0x31a40, 0x31a4c, 0x31af0, 0x31b20, 0x31b38, 0x31b3c, 0x31b80, 0x31b80, 0x31ba8, 0x31ba8, 0x31bb0, 0x31bb4, 0x31bc8, 0x31bd4, 0x32140, 0x3218c, 0x321f0, 0x321f4, 0x32200, 0x32200, 0x32218, 0x32218, 0x32400, 0x32400, 0x32408, 0x3241c, 0x32618, 0x32620, 0x32664, 0x32664, 0x326a8, 0x326a8, 0x326ec, 0x326ec, 0x32a00, 0x32abc, 0x32b00, 0x32b18, 0x32b20, 0x32b38, 0x32b40, 0x32b58, 0x32b60, 0x32b78, 0x32c00, 0x32c00, 0x32c08, 0x32c3c, 0x33000, 0x3302c, 0x33034, 0x33050, 0x33058, 0x33058, 0x33060, 0x3308c, 0x3309c, 0x330ac, 0x330c0, 0x330c0, 0x330c8, 0x330d0, 0x330d8, 0x330e0, 0x330ec, 0x3312c, 0x33134, 0x33150, 0x33158, 0x33158, 0x33160, 0x3318c, 0x3319c, 0x331ac, 0x331c0, 0x331c0, 0x331c8, 0x331d0, 0x331d8, 0x331e0, 0x331ec, 0x33290, 0x33298, 0x332c4, 0x332e4, 0x33390, 0x33398, 0x333c4, 0x333e4, 0x3342c, 0x33434, 0x33450, 0x33458, 0x33458, 0x33460, 0x3348c, 0x3349c, 0x334ac, 0x334c0, 0x334c0, 0x334c8, 0x334d0, 0x334d8, 0x334e0, 0x334ec, 0x3352c, 0x33534, 0x33550, 0x33558, 0x33558, 0x33560, 0x3358c, 0x3359c, 0x335ac, 0x335c0, 0x335c0, 0x335c8, 0x335d0, 0x335d8, 0x335e0, 0x335ec, 0x33690, 0x33698, 0x336c4, 0x336e4, 0x33790, 0x33798, 0x337c4, 0x337e4, 0x337fc, 0x33814, 0x33814, 0x33854, 0x33868, 0x33880, 0x3388c, 0x338c0, 0x338d0, 0x338e8, 0x338ec, 0x33900, 0x3392c, 0x33934, 0x33950, 0x33958, 0x33958, 0x33960, 0x3398c, 0x3399c, 0x339ac, 0x339c0, 0x339c0, 0x339c8, 0x339d0, 0x339d8, 0x339e0, 0x339ec, 0x33a90, 0x33a98, 0x33ac4, 0x33ae4, 0x33b10, 0x33b24, 0x33b28, 0x33b38, 0x33b50, 0x33bf0, 0x33c10, 0x33c24, 0x33c28, 0x33c38, 0x33c50, 0x33cf0, 0x33cfc, 0x34000, 0x34030, 0x34100, 0x34168, 0x34190, 0x341a0, 0x341a8, 0x341b8, 0x341c4, 0x341c8, 0x341d0, 0x341d0, 0x34200, 0x34320, 0x34400, 0x344b4, 0x344c0, 0x3452c, 0x34540, 0x3461c, 0x34800, 0x348a0, 0x348c0, 0x34908, 0x34910, 0x349b8, 0x34a00, 0x34a04, 0x34a0c, 0x34a14, 0x34a1c, 0x34a2c, 0x34a44, 0x34a50, 0x34a74, 0x34a74, 0x34a7c, 0x34afc, 0x34b08, 0x34c24, 0x34d00, 0x34d14, 0x34d1c, 0x34d3c, 0x34d44, 0x34d4c, 0x34d54, 0x34d74, 0x34d7c, 0x34d7c, 0x34de0, 0x34de0, 0x34e00, 0x34ed4, 0x34f00, 0x34fa4, 0x34fc0, 0x34fc4, 0x35000, 0x35004, 0x35080, 0x350fc, 0x35208, 0x35220, 0x3523c, 0x35254, 0x35300, 0x35300, 0x35308, 0x3531c, 0x35338, 0x3533c, 0x35380, 0x35380, 0x35388, 0x353a8, 0x353b4, 0x353b4, 0x35400, 0x35420, 0x35438, 0x3543c, 0x35480, 0x35480, 0x354a8, 0x354a8, 0x354b0, 0x354b4, 0x354c8, 0x354d4, 0x35a40, 0x35a4c, 0x35af0, 0x35b20, 0x35b38, 0x35b3c, 0x35b80, 0x35b80, 0x35ba8, 0x35ba8, 0x35bb0, 0x35bb4, 0x35bc8, 0x35bd4, 0x36140, 0x3618c, 0x361f0, 0x361f4, 0x36200, 0x36200, 0x36218, 0x36218, 0x36400, 0x36400, 0x36408, 0x3641c, 0x36618, 0x36620, 0x36664, 0x36664, 0x366a8, 0x366a8, 0x366ec, 0x366ec, 0x36a00, 0x36abc, 0x36b00, 0x36b18, 0x36b20, 0x36b38, 0x36b40, 0x36b58, 0x36b60, 0x36b78, 0x36c00, 0x36c00, 0x36c08, 0x36c3c, 0x37000, 0x3702c, 0x37034, 0x37050, 0x37058, 0x37058, 0x37060, 0x3708c, 0x3709c, 0x370ac, 0x370c0, 0x370c0, 0x370c8, 0x370d0, 0x370d8, 0x370e0, 0x370ec, 0x3712c, 0x37134, 0x37150, 0x37158, 0x37158, 0x37160, 0x3718c, 0x3719c, 0x371ac, 0x371c0, 0x371c0, 0x371c8, 0x371d0, 0x371d8, 0x371e0, 0x371ec, 0x37290, 0x37298, 0x372c4, 0x372e4, 0x37390, 0x37398, 0x373c4, 0x373e4, 0x3742c, 0x37434, 0x37450, 0x37458, 0x37458, 0x37460, 0x3748c, 0x3749c, 0x374ac, 0x374c0, 0x374c0, 0x374c8, 0x374d0, 0x374d8, 0x374e0, 0x374ec, 0x3752c, 0x37534, 0x37550, 0x37558, 0x37558, 0x37560, 0x3758c, 0x3759c, 0x375ac, 0x375c0, 0x375c0, 0x375c8, 0x375d0, 0x375d8, 0x375e0, 0x375ec, 0x37690, 0x37698, 0x376c4, 0x376e4, 0x37790, 0x37798, 0x377c4, 0x377e4, 0x377fc, 0x37814, 0x37814, 0x37854, 0x37868, 0x37880, 0x3788c, 0x378c0, 0x378d0, 0x378e8, 0x378ec, 0x37900, 0x3792c, 0x37934, 0x37950, 0x37958, 0x37958, 0x37960, 0x3798c, 0x3799c, 0x379ac, 0x379c0, 0x379c0, 0x379c8, 0x379d0, 0x379d8, 0x379e0, 0x379ec, 0x37a90, 0x37a98, 0x37ac4, 0x37ae4, 0x37b10, 0x37b24, 0x37b28, 0x37b38, 0x37b50, 0x37bf0, 0x37c10, 0x37c24, 0x37c28, 0x37c38, 0x37c50, 0x37cf0, 0x37cfc, 0x40040, 0x40040, 0x40080, 0x40084, 0x40100, 0x40100, 0x40140, 0x401bc, 0x40200, 0x40214, 0x40228, 0x40228, 0x40240, 0x40258, 0x40280, 0x40280, 0x40304, 0x40304, 0x40330, 0x4033c, 0x41304, 0x413c8, 0x413d0, 0x413dc, 0x413f0, 0x413f0, 0x41400, 0x4140c, 0x41414, 0x4141c, 0x41480, 0x414d0, 0x44000, 0x4407c, 0x440c0, 0x441ac, 0x441b4, 0x4427c, 0x442c0, 0x443ac, 0x443b4, 0x4447c, 0x444c0, 0x445ac, 0x445b4, 0x4467c, 0x446c0, 0x447ac, 0x447b4, 0x4487c, 0x448c0, 0x449ac, 0x449b4, 0x44a7c, 0x44ac0, 0x44bac, 0x44bb4, 0x44c7c, 0x44cc0, 0x44dac, 0x44db4, 0x44e7c, 0x44ec0, 0x44fac, 0x44fb4, 0x4507c, 0x450c0, 0x451ac, 0x451b4, 0x451fc, 0x45800, 0x45804, 0x45810, 0x45830, 0x45840, 0x45860, 0x45868, 0x45868, 0x45880, 0x45884, 0x458a0, 0x458b0, 0x45a00, 0x45a04, 0x45a10, 0x45a30, 0x45a40, 0x45a60, 0x45a68, 0x45a68, 0x45a80, 0x45a84, 0x45aa0, 0x45ab0, 0x460c0, 0x460e4, 0x47000, 0x4703c, 0x47044, 0x4708c, 0x47200, 0x47250, 0x47400, 0x47408, 0x47414, 0x47420, 0x47600, 0x47618, 0x47800, 0x47814, 0x47820, 0x4782c, 0x50000, 0x50084, 0x50090, 0x500cc, 0x50300, 0x50384, 0x50400, 0x50400, 0x50800, 0x50884, 0x50890, 0x508cc, 0x50b00, 0x50b84, 0x50c00, 0x50c00, 0x51000, 0x51020, 0x51028, 0x510b0, 0x51300, 0x51324, }; static const unsigned int t6vf_reg_ranges[] = { VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), VF_MPS_REG(A_MPS_VF_CTL), VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), FW_T6VF_MBDATA_BASE_ADDR, FW_T6VF_MBDATA_BASE_ADDR + ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), }; u32 *buf_end = (u32 *)(buf + buf_size); const unsigned int *reg_ranges; int reg_ranges_size, range; unsigned int chip_version = chip_id(adap); /* * Select the right set of register ranges to dump depending on the * adapter chip type. */ switch (chip_version) { case CHELSIO_T4: if (adap->flags & IS_VF) { reg_ranges = t4vf_reg_ranges; reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges); } else { reg_ranges = t4_reg_ranges; reg_ranges_size = ARRAY_SIZE(t4_reg_ranges); } break; case CHELSIO_T5: if (adap->flags & IS_VF) { reg_ranges = t5vf_reg_ranges; reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges); } else { reg_ranges = t5_reg_ranges; reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); } break; case CHELSIO_T6: if (adap->flags & IS_VF) { reg_ranges = t6vf_reg_ranges; reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges); } else { reg_ranges = t6_reg_ranges; reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); } break; default: CH_ERR(adap, "Unsupported chip version %d\n", chip_version); return; } /* * Clear the register buffer and insert the appropriate register * values selected by the above register ranges. */ memset(buf, 0, buf_size); for (range = 0; range < reg_ranges_size; range += 2) { unsigned int reg = reg_ranges[range]; unsigned int last_reg = reg_ranges[range + 1]; u32 *bufp = (u32 *)(buf + reg); /* * Iterate across the register range filling in the register * buffer but don't write past the end of the register buffer. */ while (reg <= last_reg && bufp < buf_end) { *bufp++ = t4_read_reg(adap, reg); reg += sizeof(u32); } } } /* * Partial EEPROM Vital Product Data structure. The VPD starts with one ID * header followed by one or more VPD-R sections, each with its own header. */ struct t4_vpd_hdr { u8 id_tag; u8 id_len[2]; u8 id_data[ID_LEN]; }; struct t4_vpdr_hdr { u8 vpdr_tag; u8 vpdr_len[2]; }; /* * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */ #define EEPROM_DELAY 10 /* 10us per poll spin */ #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */ #define EEPROM_STAT_ADDR 0x7bfc #define VPD_SIZE 0x800 #define VPD_BASE 0x400 #define VPD_BASE_OLD 0 #define VPD_LEN 1024 #define VPD_INFO_FLD_HDR_SIZE 3 #define CHELSIO_VPD_UNIQUE_ID 0x82 /* * Small utility function to wait till any outstanding VPD Access is complete. * We have a per-adapter state variable "VPD Busy" to indicate when we have a * VPD Access in flight. This allows us to handle the problem of having a * previous VPD Access time out and prevent an attempt to inject a new VPD * Request before any in-flight VPD reguest has completed. */ static int t4_seeprom_wait(struct adapter *adapter) { unsigned int base = adapter->params.pci.vpd_cap_addr; int max_poll; /* * If no VPD Access is in flight, we can just return success right * away. */ if (!adapter->vpd_busy) return 0; /* * Poll the VPD Capability Address/Flag register waiting for it * to indicate that the operation is complete. */ max_poll = EEPROM_MAX_POLL; do { u16 val; udelay(EEPROM_DELAY); t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); /* * If the operation is complete, mark the VPD as no longer * busy and return success. */ if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) { adapter->vpd_busy = 0; return 0; } } while (--max_poll); /* * Failure! Note that we leave the VPD Busy status set in order to * avoid pushing a new VPD Access request into the VPD Capability till * the current operation eventually succeeds. It's a bug to issue a * new request when an existing request is in flight and will result * in corrupt hardware state. */ return -ETIMEDOUT; } /** * t4_seeprom_read - read a serial EEPROM location * @adapter: adapter to read * @addr: EEPROM virtual address * @data: where to store the read data * * Read a 32-bit word from a location in serial EEPROM using the card's PCI * VPD capability. Note that this function must be called with a virtual * address. */ int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) { unsigned int base = adapter->params.pci.vpd_cap_addr; int ret; /* * VPD Accesses must alway be 4-byte aligned! */ if (addr >= EEPROMVSIZE || (addr & 3)) return -EINVAL; /* * Wait for any previous operation which may still be in flight to * complete. */ ret = t4_seeprom_wait(adapter); if (ret) { CH_ERR(adapter, "VPD still busy from previous operation\n"); return ret; } /* * Issue our new VPD Read request, mark the VPD as being busy and wait * for our request to complete. If it doesn't complete, note the * error and return it to our caller. Note that we do not reset the * VPD Busy status! */ t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); adapter->vpd_busy = 1; adapter->vpd_flag = PCI_VPD_ADDR_F; ret = t4_seeprom_wait(adapter); if (ret) { CH_ERR(adapter, "VPD read of address %#x failed\n", addr); return ret; } /* * Grab the returned data, swizzle it into our endianness and * return success. */ t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); *data = le32_to_cpu(*data); return 0; } /** * t4_seeprom_write - write a serial EEPROM location * @adapter: adapter to write * @addr: virtual EEPROM address * @data: value to write * * Write a 32-bit word to a location in serial EEPROM using the card's PCI * VPD capability. Note that this function must be called with a virtual * address. */ int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) { unsigned int base = adapter->params.pci.vpd_cap_addr; int ret; u32 stats_reg; int max_poll; /* * VPD Accesses must alway be 4-byte aligned! */ if (addr >= EEPROMVSIZE || (addr & 3)) return -EINVAL; /* * Wait for any previous operation which may still be in flight to * complete. */ ret = t4_seeprom_wait(adapter); if (ret) { CH_ERR(adapter, "VPD still busy from previous operation\n"); return ret; } /* * Issue our new VPD Read request, mark the VPD as being busy and wait * for our request to complete. If it doesn't complete, note the * error and return it to our caller. Note that we do not reset the * VPD Busy status! */ t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, cpu_to_le32(data)); t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr | PCI_VPD_ADDR_F); adapter->vpd_busy = 1; adapter->vpd_flag = 0; ret = t4_seeprom_wait(adapter); if (ret) { CH_ERR(adapter, "VPD write of address %#x failed\n", addr); return ret; } /* * Reset PCI_VPD_DATA register after a transaction and wait for our * request to complete. If it doesn't complete, return error. */ t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0); max_poll = EEPROM_MAX_POLL; do { udelay(EEPROM_DELAY); t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg); } while ((stats_reg & 0x1) && --max_poll); if (!max_poll) return -ETIMEDOUT; /* Return success! */ return 0; } /** * t4_eeprom_ptov - translate a physical EEPROM address to virtual * @phys_addr: the physical EEPROM address * @fn: the PCI function number * @sz: size of function-specific area * * Translate a physical EEPROM address to virtual. The first 1K is * accessed through virtual addresses starting at 31K, the rest is * accessed through virtual addresses starting at 0. * * The mapping is as follows: * [0..1K) -> [31K..32K) * [1K..1K+A) -> [ES-A..ES) * [1K+A..ES) -> [0..ES-A-1K) * * where A = @fn * @sz, and ES = EEPROM size. */ int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) { fn *= sz; if (phys_addr < 1024) return phys_addr + (31 << 10); if (phys_addr < 1024 + fn) return EEPROMSIZE - fn + phys_addr - 1024; if (phys_addr < EEPROMSIZE) return phys_addr - 1024 - fn; return -EINVAL; } /** * t4_seeprom_wp - enable/disable EEPROM write protection * @adapter: the adapter * @enable: whether to enable or disable write protection * * Enables or disables write protection on the serial EEPROM. */ int t4_seeprom_wp(struct adapter *adapter, int enable) { return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); } /** * get_vpd_keyword_val - Locates an information field keyword in the VPD * @vpd: Pointer to buffered vpd data structure * @kw: The keyword to search for * @region: VPD region to search (starting from 0) * * Returns the value of the information field keyword or * -ENOENT otherwise. */ static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region) { int i, tag; unsigned int offset, len; const struct t4_vpdr_hdr *vpdr; offset = sizeof(struct t4_vpd_hdr); vpdr = (const void *)(vpd + offset); tag = vpdr->vpdr_tag; len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8); while (region--) { offset += sizeof(struct t4_vpdr_hdr) + len; vpdr = (const void *)(vpd + offset); if (++tag != vpdr->vpdr_tag) return -ENOENT; len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8); } offset += sizeof(struct t4_vpdr_hdr); if (offset + len > VPD_LEN) { return -ENOENT; } for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { if (memcmp(vpd + i , kw , 2) == 0){ i += VPD_INFO_FLD_HDR_SIZE; return i; } i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2]; } return -ENOENT; } /** * get_vpd_params - read VPD parameters from VPD EEPROM * @adapter: adapter to read * @p: where to store the parameters * @vpd: caller provided temporary space to read the VPD into * * Reads card parameters stored in VPD EEPROM. */ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p, uint16_t device_id, u32 *buf) { int i, ret, addr; int ec, sn, pn, na, md; u8 csum; const u8 *vpd = (const u8 *)buf; /* * Card information normally starts at VPD_BASE but early cards had * it at 0. */ ret = t4_seeprom_read(adapter, VPD_BASE, buf); if (ret) return (ret); /* * The VPD shall have a unique identifier specified by the PCI SIG. * For chelsio adapters, the identifier is 0x82. The first byte of a VPD * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software * is expected to automatically put this entry at the * beginning of the VPD. */ addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; for (i = 0; i < VPD_LEN; i += 4) { ret = t4_seeprom_read(adapter, addr + i, buf++); if (ret) return ret; } #define FIND_VPD_KW(var,name) do { \ var = get_vpd_keyword_val(vpd, name, 0); \ if (var < 0) { \ CH_ERR(adapter, "missing VPD keyword " name "\n"); \ return -EINVAL; \ } \ } while (0) FIND_VPD_KW(i, "RV"); for (csum = 0; i >= 0; i--) csum += vpd[i]; if (csum) { CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum); return -EINVAL; } FIND_VPD_KW(ec, "EC"); FIND_VPD_KW(sn, "SN"); FIND_VPD_KW(pn, "PN"); FIND_VPD_KW(na, "NA"); #undef FIND_VPD_KW memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN); strstrip(p->id); memcpy(p->ec, vpd + ec, EC_LEN); strstrip(p->ec); i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); strstrip(p->sn); i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; memcpy(p->pn, vpd + pn, min(i, PN_LEN)); strstrip((char *)p->pn); i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); strstrip((char *)p->na); if (device_id & 0x80) return 0; /* Custom card */ md = get_vpd_keyword_val(vpd, "VF", 1); if (md < 0) { snprintf(p->md, sizeof(p->md), "unknown"); } else { i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2]; memcpy(p->md, vpd + md, min(i, MD_LEN)); strstrip((char *)p->md); } return 0; } /* serial flash and firmware constants and flash config file constants */ enum { SF_ATTEMPTS = 10, /* max retries for SF operations */ /* flash command opcodes */ SF_PROG_PAGE = 2, /* program 256B page */ SF_WR_DISABLE = 4, /* disable writes */ SF_RD_STATUS = 5, /* read status register */ SF_WR_ENABLE = 6, /* enable writes */ SF_RD_DATA_FAST = 0xb, /* read flash */ SF_RD_ID = 0x9f, /* read ID */ SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */ }; /** * sf1_read - read data from the serial flash * @adapter: the adapter * @byte_cnt: number of bytes to read * @cont: whether another operation will be chained * @lock: whether to lock SF for PL access only * @valp: where to store the read data * * Reads up to 4 bytes of data from the serial flash. The location of * the read needs to be specified prior to calling this by issuing the * appropriate commands to the serial flash. */ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock, u32 *valp) { int ret; if (!byte_cnt || byte_cnt > 4) return -EINVAL; if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) return -EBUSY; t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); if (!ret) *valp = t4_read_reg(adapter, A_SF_DATA); return ret; } /** * sf1_write - write data to the serial flash * @adapter: the adapter * @byte_cnt: number of bytes to write * @cont: whether another operation will be chained * @lock: whether to lock SF for PL access only * @val: value to write * * Writes up to 4 bytes of data to the serial flash. The location of * the write needs to be specified prior to calling this by issuing the * appropriate commands to the serial flash. */ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock, u32 val) { if (!byte_cnt || byte_cnt > 4) return -EINVAL; if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) return -EBUSY; t4_write_reg(adapter, A_SF_DATA, val); t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); } /** * flash_wait_op - wait for a flash operation to complete * @adapter: the adapter * @attempts: max number of polls of the status register * @delay: delay between polls in ms * * Wait for a flash operation to complete by polling the status register. */ static int flash_wait_op(struct adapter *adapter, int attempts, int delay) { int ret; u32 status; while (1) { if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) return ret; if (!(status & 1)) return 0; if (--attempts == 0) return -EAGAIN; if (delay) msleep(delay); } } /** * t4_read_flash - read words from serial flash * @adapter: the adapter * @addr: the start address for the read * @nwords: how many 32-bit words to read * @data: where to store the read data * @byte_oriented: whether to store data as bytes or as words * * Read the specified number of 32-bit words from the serial flash. * If @byte_oriented is set the read data is stored as a byte array * (i.e., big-endian), otherwise as 32-bit words in the platform's * natural endianness. */ int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented) { int ret; if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) return -EINVAL; addr = swab32(addr) | SF_RD_DATA_FAST; if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) return ret; for ( ; nwords; nwords--, data++) { ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); if (nwords == 1) t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ if (ret) return ret; if (byte_oriented) *data = (__force __u32)(cpu_to_be32(*data)); } return 0; } /** * t4_write_flash - write up to a page of data to the serial flash * @adapter: the adapter * @addr: the start address to write * @n: length of data to write in bytes * @data: the data to write * @byte_oriented: whether to store data as bytes or as words * * Writes up to a page of data (256 bytes) to the serial flash starting * at the given address. All the data must be written to the same page. * If @byte_oriented is set the write data is stored as byte stream * (i.e. matches what on disk), otherwise in big-endian. */ int t4_write_flash(struct adapter *adapter, unsigned int addr, unsigned int n, const u8 *data, int byte_oriented) { int ret; u32 buf[SF_PAGE_SIZE / 4]; unsigned int i, c, left, val, offset = addr & 0xff; if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) return -EINVAL; val = swab32(addr) | SF_PROG_PAGE; if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) goto unlock; for (left = n; left; left -= c) { c = min(left, 4U); for (val = 0, i = 0; i < c; ++i) val = (val << 8) + *data++; if (!byte_oriented) val = cpu_to_be32(val); ret = sf1_write(adapter, c, c != left, 1, val); if (ret) goto unlock; } ret = flash_wait_op(adapter, 8, 1); if (ret) goto unlock; t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ /* Read the page to verify the write succeeded */ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, byte_oriented); if (ret) return ret; if (memcmp(data - n, (u8 *)buf + offset, n)) { CH_ERR(adapter, "failed to correctly write the flash page at %#x\n", addr); return -EIO; } return 0; unlock: t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ return ret; } /** * t4_get_fw_version - read the firmware version * @adapter: the adapter * @vers: where to place the version * * Reads the FW version from flash. */ int t4_get_fw_version(struct adapter *adapter, u32 *vers) { return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1, vers, 0); } /** * t4_get_fw_hdr - read the firmware header * @adapter: the adapter * @hdr: where to place the version * * Reads the FW header from flash into caller provided buffer. */ int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr) { return t4_read_flash(adapter, FLASH_FW_START, sizeof (*hdr) / sizeof (uint32_t), (uint32_t *)hdr, 1); } /** * t4_get_bs_version - read the firmware bootstrap version * @adapter: the adapter * @vers: where to place the version * * Reads the FW Bootstrap version from flash. */ int t4_get_bs_version(struct adapter *adapter, u32 *vers) { return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START + offsetof(struct fw_hdr, fw_ver), 1, vers, 0); } /** * t4_get_tp_version - read the TP microcode version * @adapter: the adapter * @vers: where to place the version * * Reads the TP microcode version from flash. */ int t4_get_tp_version(struct adapter *adapter, u32 *vers) { return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0); } /** * t4_get_exprom_version - return the Expansion ROM version (if any) * @adapter: the adapter * @vers: where to place the version * * Reads the Expansion ROM header from FLASH and returns the version * number (if present) through the @vers return value pointer. We return * this in the Firmware Version Format since it's convenient. Return * 0 on success, -ENOENT if no Expansion ROM is present. */ int t4_get_exprom_version(struct adapter *adap, u32 *vers) { struct exprom_header { unsigned char hdr_arr[16]; /* must start with 0x55aa */ unsigned char hdr_ver[4]; /* Expansion ROM version */ } *hdr; u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), sizeof(u32))]; int ret; ret = t4_read_flash(adap, FLASH_EXP_ROM_START, ARRAY_SIZE(exprom_header_buf), exprom_header_buf, 0); if (ret) return ret; hdr = (struct exprom_header *)exprom_header_buf; if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) return -ENOENT; *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) | V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) | V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) | V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3])); return 0; } /** * t4_get_scfg_version - return the Serial Configuration version * @adapter: the adapter * @vers: where to place the version * * Reads the Serial Configuration Version via the Firmware interface * (thus this can only be called once we're ready to issue Firmware * commands). The format of the Serial Configuration version is * adapter specific. Returns 0 on success, an error on failure. * * Note that early versions of the Firmware didn't include the ability * to retrieve the Serial Configuration version, so we zero-out the * return-value parameter in that case to avoid leaving it with * garbage in it. * * Also note that the Firmware will return its cached copy of the Serial * Initialization Revision ID, not the actual Revision ID as written in * the Serial EEPROM. This is only an issue if a new VPD has been written * and the Firmware/Chip haven't yet gone through a RESET sequence. So * it's best to defer calling this routine till after a FW_RESET_CMD has * been issued if the Host Driver will be performing a full adapter * initialization. */ int t4_get_scfg_version(struct adapter *adapter, u32 *vers) { u32 scfgrev_param; int ret; scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV)); ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, &scfgrev_param, vers); if (ret) *vers = 0; return ret; } /** * t4_get_vpd_version - return the VPD version * @adapter: the adapter * @vers: where to place the version * * Reads the VPD via the Firmware interface (thus this can only be called * once we're ready to issue Firmware commands). The format of the * VPD version is adapter specific. Returns 0 on success, an error on * failure. * * Note that early versions of the Firmware didn't include the ability * to retrieve the VPD version, so we zero-out the return-value parameter * in that case to avoid leaving it with garbage in it. * * Also note that the Firmware will return its cached copy of the VPD * Revision ID, not the actual Revision ID as written in the Serial * EEPROM. This is only an issue if a new VPD has been written and the * Firmware/Chip haven't yet gone through a RESET sequence. So it's best * to defer calling this routine till after a FW_RESET_CMD has been issued * if the Host Driver will be performing a full adapter initialization. */ int t4_get_vpd_version(struct adapter *adapter, u32 *vers) { u32 vpdrev_param; int ret; vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV)); ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, &vpdrev_param, vers); if (ret) *vers = 0; return ret; } /** * t4_get_version_info - extract various chip/firmware version information * @adapter: the adapter * * Reads various chip/firmware version numbers and stores them into the * adapter Adapter Parameters structure. If any of the efforts fails * the first failure will be returned, but all of the version numbers * will be read. */ int t4_get_version_info(struct adapter *adapter) { int ret = 0; #define FIRST_RET(__getvinfo) \ do { \ int __ret = __getvinfo; \ if (__ret && !ret) \ ret = __ret; \ } while (0) FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers)); FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers)); FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers)); FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers)); FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers)); FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers)); #undef FIRST_RET return ret; } /** * t4_flash_erase_sectors - erase a range of flash sectors * @adapter: the adapter * @start: the first sector to erase * @end: the last sector to erase * * Erases the sectors in the given inclusive range. */ int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) { int ret = 0; if (end >= adapter->params.sf_nsec) return -EINVAL; while (start <= end) { if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 0, 1, SF_ERASE_SECTOR | (start << 8))) != 0 || (ret = flash_wait_op(adapter, 14, 500)) != 0) { CH_ERR(adapter, "erase of flash sector %d failed, error %d\n", start, ret); break; } start++; } t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ return ret; } /** * t4_flash_cfg_addr - return the address of the flash configuration file * @adapter: the adapter * * Return the address within the flash where the Firmware Configuration * File is stored, or an error if the device FLASH is too small to contain * a Firmware Configuration File. */ int t4_flash_cfg_addr(struct adapter *adapter) { /* * If the device FLASH isn't large enough to hold a Firmware * Configuration File, return an error. */ if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) return -ENOSPC; return FLASH_CFG_START; } /* * Return TRUE if the specified firmware matches the adapter. I.e. T4 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead * and emit an error message for mismatched firmware to save our caller the * effort ... */ static int t4_fw_matches_chip(struct adapter *adap, const struct fw_hdr *hdr) { /* * The expression below will return FALSE for any unsupported adapter * which will keep us "honest" in the future ... */ if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) || (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) || (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6)) return 1; CH_ERR(adap, "FW image (%d) is not suitable for this adapter (%d)\n", hdr->chip, chip_id(adap)); return 0; } /** * t4_load_fw - download firmware * @adap: the adapter * @fw_data: the firmware image to write * @size: image size * * Write the supplied firmware image to the card's serial flash. */ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) { u32 csum; int ret, addr; unsigned int i; u8 first_page[SF_PAGE_SIZE]; const u32 *p = (const u32 *)fw_data; const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; unsigned int fw_start_sec; unsigned int fw_start; unsigned int fw_size; if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) { fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC; fw_start = FLASH_FWBOOTSTRAP_START; fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE; } else { fw_start_sec = FLASH_FW_START_SEC; fw_start = FLASH_FW_START; fw_size = FLASH_FW_MAX_SIZE; } if (!size) { CH_ERR(adap, "FW image has no data\n"); return -EINVAL; } if (size & 511) { CH_ERR(adap, "FW image size not multiple of 512 bytes\n"); return -EINVAL; } if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) { CH_ERR(adap, "FW image size differs from size in FW header\n"); return -EINVAL; } if (size > fw_size) { CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size); return -EFBIG; } if (!t4_fw_matches_chip(adap, hdr)) return -EINVAL; for (csum = 0, i = 0; i < size / sizeof(csum); i++) csum += be32_to_cpu(p[i]); if (csum != 0xffffffff) { CH_ERR(adap, "corrupted firmware image, checksum %#x\n", csum); return -EINVAL; } i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); if (ret) goto out; /* * We write the correct version at the end so the driver can see a bad * version if the FW write fails. Start by writing a copy of the * first page with a bad version. */ memcpy(first_page, fw_data, SF_PAGE_SIZE); ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); if (ret) goto out; addr = fw_start; for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { addr += SF_PAGE_SIZE; fw_data += SF_PAGE_SIZE; ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); if (ret) goto out; } ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver), sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); out: if (ret) CH_ERR(adap, "firmware download failed, error %d\n", ret); return ret; } /** * t4_fwcache - firmware cache operation * @adap: the adapter * @op : the operation (flush or flush and invalidate) */ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) { struct fw_params_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(adap->pf) | V_FW_PARAMS_CMD_VFN(0)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c.param[0].mnem = cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE)); c.param[0].val = (__force __be32)op; return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); } void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr) { int i, j; u32 cfg, val, req, rsp; cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); if (cfg & F_LADBGEN) t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); val = t4_read_reg(adap, A_CIM_DEBUGSTS); req = G_POLADBGWRPTR(val); rsp = G_PILADBGWRPTR(val); if (pif_req_wrptr) *pif_req_wrptr = req; if (pif_rsp_wrptr) *pif_rsp_wrptr = rsp; for (i = 0; i < CIM_PIFLA_SIZE; i++) { for (j = 0; j < 6; j++) { t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | V_PILADBGRDPTR(rsp)); *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); req++; rsp++; } req = (req + 2) & M_POLADBGRDPTR; rsp = (rsp + 2) & M_PILADBGRDPTR; } t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); } void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) { u32 cfg; int i, j, idx; cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); if (cfg & F_LADBGEN) t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); for (i = 0; i < CIM_MALA_SIZE; i++) { for (j = 0; j < 5; j++) { idx = 8 * i + j; t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | V_PILADBGRDPTR(idx)); *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); } } t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); } void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) { unsigned int i, j; for (i = 0; i < 8; i++) { u32 *p = la_buf + i; t4_write_reg(adap, A_ULP_RX_LA_CTL, i); j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); } } /** * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits * @caps16: a 16-bit Port Capabilities value * * Returns the equivalent 32-bit Port Capabilities value. */ static uint32_t fwcaps16_to_caps32(uint16_t caps16) { uint32_t caps32 = 0; #define CAP16_TO_CAP32(__cap) \ do { \ if (caps16 & FW_PORT_CAP_##__cap) \ caps32 |= FW_PORT_CAP32_##__cap; \ } while (0) CAP16_TO_CAP32(SPEED_100M); CAP16_TO_CAP32(SPEED_1G); CAP16_TO_CAP32(SPEED_25G); CAP16_TO_CAP32(SPEED_10G); CAP16_TO_CAP32(SPEED_40G); CAP16_TO_CAP32(SPEED_100G); CAP16_TO_CAP32(FC_RX); CAP16_TO_CAP32(FC_TX); CAP16_TO_CAP32(ANEG); CAP16_TO_CAP32(FORCE_PAUSE); CAP16_TO_CAP32(MDIAUTO); CAP16_TO_CAP32(MDISTRAIGHT); CAP16_TO_CAP32(FEC_RS); CAP16_TO_CAP32(FEC_BASER_RS); CAP16_TO_CAP32(802_3_PAUSE); CAP16_TO_CAP32(802_3_ASM_DIR); #undef CAP16_TO_CAP32 return caps32; } /** * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits * @caps32: a 32-bit Port Capabilities value * * Returns the equivalent 16-bit Port Capabilities value. Note that * not all 32-bit Port Capabilities can be represented in the 16-bit * Port Capabilities and some fields/values may not make it. */ static uint16_t fwcaps32_to_caps16(uint32_t caps32) { uint16_t caps16 = 0; #define CAP32_TO_CAP16(__cap) \ do { \ if (caps32 & FW_PORT_CAP32_##__cap) \ caps16 |= FW_PORT_CAP_##__cap; \ } while (0) CAP32_TO_CAP16(SPEED_100M); CAP32_TO_CAP16(SPEED_1G); CAP32_TO_CAP16(SPEED_10G); CAP32_TO_CAP16(SPEED_25G); CAP32_TO_CAP16(SPEED_40G); CAP32_TO_CAP16(SPEED_100G); CAP32_TO_CAP16(FC_RX); CAP32_TO_CAP16(FC_TX); CAP32_TO_CAP16(802_3_PAUSE); CAP32_TO_CAP16(802_3_ASM_DIR); CAP32_TO_CAP16(ANEG); CAP32_TO_CAP16(FORCE_PAUSE); CAP32_TO_CAP16(MDIAUTO); CAP32_TO_CAP16(MDISTRAIGHT); CAP32_TO_CAP16(FEC_RS); CAP32_TO_CAP16(FEC_BASER_RS); #undef CAP32_TO_CAP16 return caps16; } static bool is_bt(struct port_info *pi) { return (pi->port_type == FW_PORT_TYPE_BT_SGMII || pi->port_type == FW_PORT_TYPE_BT_XFI || pi->port_type == FW_PORT_TYPE_BT_XAUI); } /** * t4_link_l1cfg - apply link configuration to MAC/PHY * @phy: the PHY to setup * @mac: the MAC to setup * @lc: the requested link configuration * * Set up a port's MAC and PHY according to a desired link configuration. * - If the PHY can auto-negotiate first decide what to advertise, then * enable/disable auto-negotiation as desired, and reset. * - If the PHY does not auto-negotiate just reset it. * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; unsigned int mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO); unsigned int aneg, fc, fec, speed, rcap; fc = 0; if (lc->requested_fc & PAUSE_RX) fc |= FW_PORT_CAP32_FC_RX; if (lc->requested_fc & PAUSE_TX) fc |= FW_PORT_CAP32_FC_TX; if (!(lc->requested_fc & PAUSE_AUTONEG)) fc |= FW_PORT_CAP32_FORCE_PAUSE; fec = 0; if (lc->requested_fec == FEC_AUTO) fec = lc->fec_hint; else { if (lc->requested_fec & FEC_RS) fec |= FW_PORT_CAP32_FEC_RS; if (lc->requested_fec & FEC_BASER_RS) fec |= FW_PORT_CAP32_FEC_BASER_RS; } if (lc->requested_aneg == AUTONEG_DISABLE) aneg = 0; else if (lc->requested_aneg == AUTONEG_ENABLE) aneg = FW_PORT_CAP32_ANEG; else aneg = lc->supported & FW_PORT_CAP32_ANEG; if (aneg) { speed = lc->supported & V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED); } else if (lc->requested_speed != 0) speed = speed_to_fwcap(lc->requested_speed); else speed = fwcap_top_speed(lc->supported); /* Force AN on for BT cards. */ if (is_bt(adap->port[adap->chan_map[port]])) aneg = lc->supported & FW_PORT_CAP32_ANEG; rcap = aneg | speed | fc | fec; if ((rcap | lc->supported) != lc->supported) { #ifdef INVARIANTS CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap, lc->supported); #endif rcap &= lc->supported; } rcap |= mdi; memset(&c, 0, sizeof(c)); c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port)); if (adap->params.port_caps32) { c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) | FW_LEN16(c)); c.u.l1cfg32.rcap32 = cpu_to_be32(rcap); } else { c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); c.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); } return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); } /** * t4_restart_aneg - restart autonegotiation * @adap: the adapter * @mbox: mbox to use for the FW command * @port: the port id * * Restarts autonegotiation for the selected port. */ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) { struct fw_port_cmd c; memset(&c, 0, sizeof(c)); c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port)); c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } struct intr_details { u32 mask; const char *msg; }; struct intr_action { u32 mask; int arg; bool (*action)(struct adapter *, int, bool); }; #define NONFATAL_IF_DISABLED 1 struct intr_info { const char *name; /* name of the INT_CAUSE register */ int cause_reg; /* INT_CAUSE register */ int enable_reg; /* INT_ENABLE register */ u32 fatal; /* bits that are fatal */ int flags; /* hints */ const struct intr_details *details; const struct intr_action *actions; }; static inline char intr_alert_char(u32 cause, u32 enable, u32 fatal) { if (cause & fatal) return ('!'); if (cause & enable) return ('*'); return ('-'); } static void t4_show_intr_info(struct adapter *adap, const struct intr_info *ii, u32 cause) { u32 enable, fatal, leftover; const struct intr_details *details; char alert; enable = t4_read_reg(adap, ii->enable_reg); if (ii->flags & NONFATAL_IF_DISABLED) fatal = ii->fatal & t4_read_reg(adap, ii->enable_reg); else fatal = ii->fatal; alert = intr_alert_char(cause, enable, fatal); CH_ALERT(adap, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n", alert, ii->name, ii->cause_reg, cause, enable, fatal); leftover = cause; for (details = ii->details; details && details->mask != 0; details++) { u32 msgbits = details->mask & cause; if (msgbits == 0) continue; alert = intr_alert_char(msgbits, enable, ii->fatal); CH_ALERT(adap, " %c [0x%08x] %s\n", alert, msgbits, details->msg); leftover &= ~msgbits; } if (leftover != 0 && leftover != cause) CH_ALERT(adap, " ? [0x%08x]\n", leftover); } /* * Returns true for fatal error. */ static bool t4_handle_intr(struct adapter *adap, const struct intr_info *ii, u32 additional_cause, bool verbose) { u32 cause, fatal; bool rc; const struct intr_action *action; /* read and display cause. */ cause = t4_read_reg(adap, ii->cause_reg); if (verbose || cause != 0) t4_show_intr_info(adap, ii, cause); /* * The top level interrupt cause is a bit special and we need to ignore * the bits that are not in the enable. Note that we did display them * above in t4_show_intr_info but will not clear them. */ if (ii->cause_reg == A_PL_INT_CAUSE) cause &= t4_read_reg(adap, ii->enable_reg); fatal = cause & ii->fatal; if (fatal != 0 && ii->flags & NONFATAL_IF_DISABLED) fatal &= t4_read_reg(adap, ii->enable_reg); cause |= additional_cause; if (cause == 0) return (false); rc = fatal != 0; for (action = ii->actions; action && action->mask != 0; action++) { if (!(action->mask & cause)) continue; rc |= (action->action)(adap, action->arg, verbose); } /* clear */ t4_write_reg(adap, ii->cause_reg, cause); (void)t4_read_reg(adap, ii->cause_reg); return (rc); } /* * Interrupt handler for the PCIE module. */ static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details sysbus_intr_details[] = { { F_RNPP, "RXNP array parity error" }, { F_RPCP, "RXPC array parity error" }, { F_RCIP, "RXCIF array parity error" }, { F_RCCP, "Rx completions control array parity error" }, { F_RFTP, "RXFT array parity error" }, { 0 } }; static const struct intr_info sysbus_intr_info = { .name = "PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS", .cause_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, .enable_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_INTERRUPT_ENABLE, .fatal = F_RFTP | F_RCCP | F_RCIP | F_RPCP | F_RNPP, .flags = 0, .details = sysbus_intr_details, .actions = NULL, }; static const struct intr_details pcie_port_intr_details[] = { { F_TPCP, "TXPC array parity error" }, { F_TNPP, "TXNP array parity error" }, { F_TFTP, "TXFT array parity error" }, { F_TCAP, "TXCA array parity error" }, { F_TCIP, "TXCIF array parity error" }, { F_RCAP, "RXCA array parity error" }, { F_OTDD, "outbound request TLP discarded" }, { F_RDPE, "Rx data parity error" }, { F_TDUE, "Tx uncorrectable data error" }, { 0 } }; static const struct intr_info pcie_port_intr_info = { .name = "PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS", .cause_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, .enable_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_INTERRUPT_ENABLE, .fatal = F_TPCP | F_TNPP | F_TFTP | F_TCAP | F_TCIP | F_RCAP | F_OTDD | F_RDPE | F_TDUE, .flags = 0, .details = pcie_port_intr_details, .actions = NULL, }; static const struct intr_details pcie_intr_details[] = { { F_MSIADDRLPERR, "MSI AddrL parity error" }, { F_MSIADDRHPERR, "MSI AddrH parity error" }, { F_MSIDATAPERR, "MSI data parity error" }, { F_MSIXADDRLPERR, "MSI-X AddrL parity error" }, { F_MSIXADDRHPERR, "MSI-X AddrH parity error" }, { F_MSIXDATAPERR, "MSI-X data parity error" }, { F_MSIXDIPERR, "MSI-X DI parity error" }, { F_PIOCPLPERR, "PCIe PIO completion FIFO parity error" }, { F_PIOREQPERR, "PCIe PIO request FIFO parity error" }, { F_TARTAGPERR, "PCIe target tag FIFO parity error" }, { F_CCNTPERR, "PCIe CMD channel count parity error" }, { F_CREQPERR, "PCIe CMD channel request parity error" }, { F_CRSPPERR, "PCIe CMD channel response parity error" }, { F_DCNTPERR, "PCIe DMA channel count parity error" }, { F_DREQPERR, "PCIe DMA channel request parity error" }, { F_DRSPPERR, "PCIe DMA channel response parity error" }, { F_HCNTPERR, "PCIe HMA channel count parity error" }, { F_HREQPERR, "PCIe HMA channel request parity error" }, { F_HRSPPERR, "PCIe HMA channel response parity error" }, { F_CFGSNPPERR, "PCIe config snoop FIFO parity error" }, { F_FIDPERR, "PCIe FID parity error" }, { F_INTXCLRPERR, "PCIe INTx clear parity error" }, { F_MATAGPERR, "PCIe MA tag parity error" }, { F_PIOTAGPERR, "PCIe PIO tag parity error" }, { F_RXCPLPERR, "PCIe Rx completion parity error" }, { F_RXWRPERR, "PCIe Rx write parity error" }, { F_RPLPERR, "PCIe replay buffer parity error" }, { F_PCIESINT, "PCIe core secondary fault" }, { F_PCIEPINT, "PCIe core primary fault" }, { F_UNXSPLCPLERR, "PCIe unexpected split completion error" }, { 0 } }; static const struct intr_details t5_pcie_intr_details[] = { { F_IPGRPPERR, "Parity errors observed by IP" }, { F_NONFATALERR, "PCIe non-fatal error" }, { F_READRSPERR, "Outbound read error" }, { F_TRGT1GRPPERR, "PCIe TRGT1 group FIFOs parity error" }, { F_IPSOTPERR, "PCIe IP SOT buffer SRAM parity error" }, { F_IPRETRYPERR, "PCIe IP replay buffer parity error" }, { F_IPRXDATAGRPPERR, "PCIe IP Rx data group SRAMs parity error" }, { F_IPRXHDRGRPPERR, "PCIe IP Rx header group SRAMs parity error" }, { F_PIOTAGQPERR, "PIO tag queue FIFO parity error" }, { F_MAGRPPERR, "MA group FIFO parity error" }, { F_VFIDPERR, "VFID SRAM parity error" }, { F_FIDPERR, "FID SRAM parity error" }, { F_CFGSNPPERR, "config snoop FIFO parity error" }, { F_HRSPPERR, "HMA channel response data SRAM parity error" }, { F_HREQRDPERR, "HMA channel read request SRAM parity error" }, { F_HREQWRPERR, "HMA channel write request SRAM parity error" }, { F_DRSPPERR, "DMA channel response data SRAM parity error" }, { F_DREQRDPERR, "DMA channel write request SRAM parity error" }, { F_CRSPPERR, "CMD channel response data SRAM parity error" }, { F_CREQRDPERR, "CMD channel read request SRAM parity error" }, { F_MSTTAGQPERR, "PCIe master tag queue SRAM parity error" }, { F_TGTTAGQPERR, "PCIe target tag queue FIFO parity error" }, { F_PIOREQGRPPERR, "PIO request group FIFOs parity error" }, { F_PIOCPLGRPPERR, "PIO completion group FIFOs parity error" }, { F_MSIXDIPERR, "MSI-X DI SRAM parity error" }, { F_MSIXDATAPERR, "MSI-X data SRAM parity error" }, { F_MSIXADDRHPERR, "MSI-X AddrH SRAM parity error" }, { F_MSIXADDRLPERR, "MSI-X AddrL SRAM parity error" }, { F_MSIXSTIPERR, "MSI-X STI SRAM parity error" }, { F_MSTTIMEOUTPERR, "Master timeout FIFO parity error" }, { F_MSTGRPPERR, "Master response read queue SRAM parity error" }, { 0 } }; struct intr_info pcie_intr_info = { .name = "PCIE_INT_CAUSE", .cause_reg = A_PCIE_INT_CAUSE, .enable_reg = A_PCIE_INT_ENABLE, .fatal = 0xffffffff, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = NULL, }; bool fatal = false; if (is_t4(adap)) { fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, verbose); pcie_intr_info.details = pcie_intr_details; } else { pcie_intr_info.details = t5_pcie_intr_details; } fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, verbose); return (fatal); } /* * TP interrupt handler. */ static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details tp_intr_details[] = { { 0x3fffffff, "TP parity error" }, { F_FLMTXFLSTEMPTY, "TP out of Tx pages" }, { 0 } }; static const struct intr_info tp_intr_info = { .name = "TP_INT_CAUSE", .cause_reg = A_TP_INT_CAUSE, .enable_reg = A_TP_INT_ENABLE, .fatal = 0x7fffffff, .flags = NONFATAL_IF_DISABLED, .details = tp_intr_details, .actions = NULL, }; return (t4_handle_intr(adap, &tp_intr_info, 0, verbose)); } /* * SGE interrupt handler. */ static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_info sge_int1_info = { .name = "SGE_INT_CAUSE1", .cause_reg = A_SGE_INT_CAUSE1, .enable_reg = A_SGE_INT_ENABLE1, .fatal = 0xffffffff, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = NULL, }; static const struct intr_info sge_int2_info = { .name = "SGE_INT_CAUSE2", .cause_reg = A_SGE_INT_CAUSE2, .enable_reg = A_SGE_INT_ENABLE2, .fatal = 0xffffffff, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = NULL, }; static const struct intr_details sge_int3_details[] = { { F_ERR_FLM_DBP, "DBP pointer delivery for invalid context or QID" }, { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0, "Invalid QID or header request by IDMA" }, { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" }, { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" }, { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" }, { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" }, { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" }, { F_ERR_TIMER_ABOVE_MAX_QID, "SGE GTS with timer 0-5 for IQID > 1023" }, { F_ERR_CPL_EXCEED_IQE_SIZE, "SGE received CPL exceeding IQE size" }, { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" }, { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" }, { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" }, { F_ERR_DROPPED_DB, "SGE DB dropped" }, { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, "SGE IQID > 1023 received CPL for FL" }, { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" }, { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" }, { F_ERR_ING_CTXT_PRIO, "Ingress context manager priority user error" }, { F_ERR_EGR_CTXT_PRIO, "Egress context manager priority user error" }, { F_DBFIFO_HP_INT, "High priority DB FIFO threshold reached" }, { F_DBFIFO_LP_INT, "Low priority DB FIFO threshold reached" }, { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" }, { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" }, { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" }, { 0x0000000f, "SGE context access for invalid queue" }, { 0 } }; static const struct intr_details t6_sge_int3_details[] = { { F_ERR_FLM_DBP, "DBP pointer delivery for invalid context or QID" }, { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0, "Invalid QID or header request by IDMA" }, { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" }, { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" }, { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" }, { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" }, { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" }, { F_ERR_TIMER_ABOVE_MAX_QID, "SGE GTS with timer 0-5 for IQID > 1023" }, { F_ERR_CPL_EXCEED_IQE_SIZE, "SGE received CPL exceeding IQE size" }, { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" }, { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" }, { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" }, { F_ERR_DROPPED_DB, "SGE DB dropped" }, { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, "SGE IQID > 1023 received CPL for FL" }, { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" }, { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" }, { F_ERR_ING_CTXT_PRIO, "Ingress context manager priority user error" }, { F_ERR_EGR_CTXT_PRIO, "Egress context manager priority user error" }, { F_DBP_TBUF_FULL, "SGE DBP tbuf full" }, { F_FATAL_WRE_LEN, "SGE WRE packet less than advertized length" }, { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" }, { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" }, { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" }, { 0x0000000f, "SGE context access for invalid queue" }, { 0 } }; struct intr_info sge_int3_info = { .name = "SGE_INT_CAUSE3", .cause_reg = A_SGE_INT_CAUSE3, .enable_reg = A_SGE_INT_ENABLE3, .fatal = F_ERR_CPL_EXCEED_IQE_SIZE, .flags = 0, .details = NULL, .actions = NULL, }; static const struct intr_info sge_int4_info = { .name = "SGE_INT_CAUSE4", .cause_reg = A_SGE_INT_CAUSE4, .enable_reg = A_SGE_INT_ENABLE4, .fatal = 0, .flags = 0, .details = NULL, .actions = NULL, }; static const struct intr_info sge_int5_info = { .name = "SGE_INT_CAUSE5", .cause_reg = A_SGE_INT_CAUSE5, .enable_reg = A_SGE_INT_ENABLE5, .fatal = 0xffffffff, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = NULL, }; static const struct intr_info sge_int6_info = { .name = "SGE_INT_CAUSE6", .cause_reg = A_SGE_INT_CAUSE6, .enable_reg = A_SGE_INT_ENABLE6, .fatal = 0, .flags = 0, .details = NULL, .actions = NULL, }; bool fatal; u32 v; if (chip_id(adap) <= CHELSIO_T5) { sge_int3_info.details = sge_int3_details; } else { sge_int3_info.details = t6_sge_int3_details; } fatal = false; fatal |= t4_handle_intr(adap, &sge_int1_info, 0, verbose); fatal |= t4_handle_intr(adap, &sge_int2_info, 0, verbose); fatal |= t4_handle_intr(adap, &sge_int3_info, 0, verbose); fatal |= t4_handle_intr(adap, &sge_int4_info, 0, verbose); if (chip_id(adap) >= CHELSIO_T5) fatal |= t4_handle_intr(adap, &sge_int5_info, 0, verbose); if (chip_id(adap) >= CHELSIO_T6) fatal |= t4_handle_intr(adap, &sge_int6_info, 0, verbose); v = t4_read_reg(adap, A_SGE_ERROR_STATS); if (v & F_ERROR_QID_VALID) { CH_ERR(adap, "SGE error for QID %u\n", G_ERROR_QID(v)); if (v & F_UNCAPTURED_ERROR) CH_ERR(adap, "SGE UNCAPTURED_ERROR set (clearing)\n"); t4_write_reg(adap, A_SGE_ERROR_STATS, F_ERROR_QID_VALID | F_UNCAPTURED_ERROR); } return (fatal); } /* * CIM interrupt handler. */ static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_action cim_host_intr_actions[] = { { F_TIMER0INT, 0, t4_os_dump_cimla }, { 0 }, }; static const struct intr_details cim_host_intr_details[] = { /* T6+ */ { F_PCIE2CIMINTFPARERR, "CIM IBQ PCIe interface parity error" }, /* T5+ */ { F_MA_CIM_INTFPERR, "MA2CIM interface parity error" }, { F_PLCIM_MSTRSPDATAPARERR, "PL2CIM master response data parity error" }, { F_NCSI2CIMINTFPARERR, "CIM IBQ NC-SI interface parity error" }, { F_SGE2CIMINTFPARERR, "CIM IBQ SGE interface parity error" }, { F_ULP2CIMINTFPARERR, "CIM IBQ ULP_TX interface parity error" }, { F_TP2CIMINTFPARERR, "CIM IBQ TP interface parity error" }, { F_OBQSGERX1PARERR, "CIM OBQ SGE1_RX parity error" }, { F_OBQSGERX0PARERR, "CIM OBQ SGE0_RX parity error" }, /* T4+ */ { F_TIEQOUTPARERRINT, "CIM TIEQ outgoing FIFO parity error" }, { F_TIEQINPARERRINT, "CIM TIEQ incoming FIFO parity error" }, { F_MBHOSTPARERR, "CIM mailbox host read parity error" }, { F_MBUPPARERR, "CIM mailbox uP parity error" }, { F_IBQTP0PARERR, "CIM IBQ TP0 parity error" }, { F_IBQTP1PARERR, "CIM IBQ TP1 parity error" }, { F_IBQULPPARERR, "CIM IBQ ULP parity error" }, { F_IBQSGELOPARERR, "CIM IBQ SGE_LO parity error" }, { F_IBQSGEHIPARERR | F_IBQPCIEPARERR, /* same bit */ "CIM IBQ PCIe/SGE_HI parity error" }, { F_IBQNCSIPARERR, "CIM IBQ NC-SI parity error" }, { F_OBQULP0PARERR, "CIM OBQ ULP0 parity error" }, { F_OBQULP1PARERR, "CIM OBQ ULP1 parity error" }, { F_OBQULP2PARERR, "CIM OBQ ULP2 parity error" }, { F_OBQULP3PARERR, "CIM OBQ ULP3 parity error" }, { F_OBQSGEPARERR, "CIM OBQ SGE parity error" }, { F_OBQNCSIPARERR, "CIM OBQ NC-SI parity error" }, { F_TIMER1INT, "CIM TIMER0 interrupt" }, { F_TIMER0INT, "CIM TIMER0 interrupt" }, { F_PREFDROPINT, "CIM control register prefetch drop" }, { 0} }; static const struct intr_info cim_host_intr_info = { .name = "CIM_HOST_INT_CAUSE", .cause_reg = A_CIM_HOST_INT_CAUSE, .enable_reg = A_CIM_HOST_INT_ENABLE, .fatal = 0x007fffe6, .flags = NONFATAL_IF_DISABLED, .details = cim_host_intr_details, .actions = cim_host_intr_actions, }; static const struct intr_details cim_host_upacc_intr_details[] = { { F_EEPROMWRINT, "CIM EEPROM came out of busy state" }, { F_TIMEOUTMAINT, "CIM PIF MA timeout" }, { F_TIMEOUTINT, "CIM PIF timeout" }, { F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite" }, { F_REQOVRLOOKUPINT, "CIM request FIFO overwrite" }, { F_BLKWRPLINT, "CIM block write to PL space" }, { F_BLKRDPLINT, "CIM block read from PL space" }, { F_SGLWRPLINT, "CIM single write to PL space with illegal BEs" }, { F_SGLRDPLINT, "CIM single read from PL space with illegal BEs" }, { F_BLKWRCTLINT, "CIM block write to CTL space" }, { F_BLKRDCTLINT, "CIM block read from CTL space" }, { F_SGLWRCTLINT, "CIM single write to CTL space with illegal BEs" }, { F_SGLRDCTLINT, "CIM single read from CTL space with illegal BEs" }, { F_BLKWREEPROMINT, "CIM block write to EEPROM space" }, { F_BLKRDEEPROMINT, "CIM block read from EEPROM space" }, { F_SGLWREEPROMINT, "CIM single write to EEPROM space with illegal BEs" }, { F_SGLRDEEPROMINT, "CIM single read from EEPROM space with illegal BEs" }, { F_BLKWRFLASHINT, "CIM block write to flash space" }, { F_BLKRDFLASHINT, "CIM block read from flash space" }, { F_SGLWRFLASHINT, "CIM single write to flash space" }, { F_SGLRDFLASHINT, "CIM single read from flash space with illegal BEs" }, { F_BLKWRBOOTINT, "CIM block write to boot space" }, { F_BLKRDBOOTINT, "CIM block read from boot space" }, { F_SGLWRBOOTINT, "CIM single write to boot space" }, { F_SGLRDBOOTINT, "CIM single read from boot space with illegal BEs" }, { F_ILLWRBEINT, "CIM illegal write BEs" }, { F_ILLRDBEINT, "CIM illegal read BEs" }, { F_ILLRDINT, "CIM illegal read" }, { F_ILLWRINT, "CIM illegal write" }, { F_ILLTRANSINT, "CIM illegal transaction" }, { F_RSVDSPACEINT, "CIM reserved space access" }, {0} }; static const struct intr_info cim_host_upacc_intr_info = { .name = "CIM_HOST_UPACC_INT_CAUSE", .cause_reg = A_CIM_HOST_UPACC_INT_CAUSE, .enable_reg = A_CIM_HOST_UPACC_INT_ENABLE, .fatal = 0x3fffeeff, .flags = NONFATAL_IF_DISABLED, .details = cim_host_upacc_intr_details, .actions = NULL, }; static const struct intr_info cim_pf_host_intr_info = { .name = "CIM_PF_HOST_INT_CAUSE", .cause_reg = MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), .enable_reg = MYPF_REG(A_CIM_PF_HOST_INT_ENABLE), .fatal = 0, .flags = 0, .details = NULL, .actions = NULL, }; u32 val, fw_err; bool fatal; fw_err = t4_read_reg(adap, A_PCIE_FW); if (fw_err & F_PCIE_FW_ERR) t4_report_fw_error(adap); /* * When the Firmware detects an internal error which normally wouldn't * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order * to make sure the Host sees the Firmware Crash. So if we have a * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0 * interrupt. */ val = t4_read_reg(adap, A_CIM_HOST_INT_CAUSE); if (val & F_TIMER0INT && (!(fw_err & F_PCIE_FW_ERR) || G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) { t4_write_reg(adap, A_CIM_HOST_INT_CAUSE, F_TIMER0INT); } fatal = false; fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, verbose); return (fatal); } /* * ULP RX interrupt handler. */ static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details ulprx_intr_details[] = { /* T5+ */ { F_SE_CNT_MISMATCH_1, "ULPRX SE count mismatch in channel 1" }, { F_SE_CNT_MISMATCH_0, "ULPRX SE count mismatch in channel 0" }, /* T4+ */ { F_CAUSE_CTX_1, "ULPRX channel 1 context error" }, { F_CAUSE_CTX_0, "ULPRX channel 0 context error" }, { 0x007fffff, "ULPRX parity error" }, { 0 } }; static const struct intr_info ulprx_intr_info = { .name = "ULP_RX_INT_CAUSE", .cause_reg = A_ULP_RX_INT_CAUSE, .enable_reg = A_ULP_RX_INT_ENABLE, .fatal = 0x07ffffff, .flags = NONFATAL_IF_DISABLED, .details = ulprx_intr_details, .actions = NULL, }; static const struct intr_info ulprx_intr2_info = { .name = "ULP_RX_INT_CAUSE_2", .cause_reg = A_ULP_RX_INT_CAUSE_2, .enable_reg = A_ULP_RX_INT_ENABLE_2, .fatal = 0, .flags = 0, .details = NULL, .actions = NULL, }; bool fatal = false; fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, verbose); return (fatal); } /* * ULP TX interrupt handler. */ static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details ulptx_intr_details[] = { { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds" }, { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds" }, { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds" }, { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds" }, { 0x0fffffff, "ULPTX parity error" }, { 0 } }; static const struct intr_info ulptx_intr_info = { .name = "ULP_TX_INT_CAUSE", .cause_reg = A_ULP_TX_INT_CAUSE, .enable_reg = A_ULP_TX_INT_ENABLE, .fatal = 0x0fffffff, .flags = NONFATAL_IF_DISABLED, .details = ulptx_intr_details, .actions = NULL, }; static const struct intr_info ulptx_intr2_info = { .name = "ULP_TX_INT_CAUSE_2", .cause_reg = A_ULP_TX_INT_CAUSE_2, .enable_reg = A_ULP_TX_INT_ENABLE_2, .fatal = 0xf0, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = NULL, }; bool fatal = false; fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &ulptx_intr2_info, 0, verbose); return (fatal); } static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose) { int i; u32 data[17]; t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0], ARRAY_SIZE(data), A_PM_TX_DBG_STAT0); for (i = 0; i < ARRAY_SIZE(data); i++) { CH_ALERT(adap, " - PM_TX_DBG_STAT%u (0x%x) = 0x%08x\n", i, A_PM_TX_DBG_STAT0 + i, data[i]); } return (false); } /* * PM TX interrupt handler. */ static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_action pmtx_intr_actions[] = { { 0xffffffff, 0, pmtx_dump_dbg_stats }, { 0 }, }; static const struct intr_details pmtx_intr_details[] = { { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large" }, { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large" }, { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large" }, { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd" }, { 0x0f000000, "PMTX icspi FIFO2X Rx framing error" }, { 0x00f00000, "PMTX icspi FIFO Rx framing error" }, { 0x000f0000, "PMTX icspi FIFO Tx framing error" }, { 0x0000f000, "PMTX oespi FIFO Rx framing error" }, { 0x00000f00, "PMTX oespi FIFO Tx framing error" }, { 0x000000f0, "PMTX oespi FIFO2X Tx framing error" }, { F_OESPI_PAR_ERROR, "PMTX oespi parity error" }, { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error" }, { F_ICSPI_PAR_ERROR, "PMTX icspi parity error" }, { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error" }, { 0 } }; static const struct intr_info pmtx_intr_info = { .name = "PM_TX_INT_CAUSE", .cause_reg = A_PM_TX_INT_CAUSE, .enable_reg = A_PM_TX_INT_ENABLE, .fatal = 0xffffffff, .flags = 0, .details = pmtx_intr_details, .actions = pmtx_intr_actions, }; return (t4_handle_intr(adap, &pmtx_intr_info, 0, verbose)); } /* * PM RX interrupt handler. */ static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details pmrx_intr_details[] = { /* T6+ */ { 0x18000000, "PMRX ospi overflow" }, { F_MA_INTF_SDC_ERR, "PMRX MA interface SDC parity error" }, { F_BUNDLE_LEN_PARERR, "PMRX bundle len FIFO parity error" }, { F_BUNDLE_LEN_OVFL, "PMRX bundle len FIFO overflow" }, { F_SDC_ERR, "PMRX SDC error" }, /* T4+ */ { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd" }, { 0x003c0000, "PMRX iespi FIFO2X Rx framing error" }, { 0x0003c000, "PMRX iespi Rx framing error" }, { 0x00003c00, "PMRX iespi Tx framing error" }, { 0x00000300, "PMRX ocspi Rx framing error" }, { 0x000000c0, "PMRX ocspi Tx framing error" }, { 0x00000030, "PMRX ocspi FIFO2X Tx framing error" }, { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error" }, { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error" }, { F_IESPI_PAR_ERROR, "PMRX iespi parity error" }, { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error"}, { 0 } }; static const struct intr_info pmrx_intr_info = { .name = "PM_RX_INT_CAUSE", .cause_reg = A_PM_RX_INT_CAUSE, .enable_reg = A_PM_RX_INT_ENABLE, .fatal = 0x1fffffff, .flags = NONFATAL_IF_DISABLED, .details = pmrx_intr_details, .actions = NULL, }; return (t4_handle_intr(adap, &pmrx_intr_info, 0, verbose)); } /* * CPL switch interrupt handler. */ static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details cplsw_intr_details[] = { /* T5+ */ { F_PERR_CPL_128TO128_1, "CPLSW 128TO128 FIFO1 parity error" }, { F_PERR_CPL_128TO128_0, "CPLSW 128TO128 FIFO0 parity error" }, /* T4+ */ { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error" }, { F_CIM_OVFL_ERROR, "CPLSW CIM overflow" }, { F_TP_FRAMING_ERROR, "CPLSW TP framing error" }, { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error" }, { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error" }, { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error" }, { 0 } }; static const struct intr_info cplsw_intr_info = { .name = "CPL_INTR_CAUSE", .cause_reg = A_CPL_INTR_CAUSE, .enable_reg = A_CPL_INTR_ENABLE, .fatal = 0xff, .flags = NONFATAL_IF_DISABLED, .details = cplsw_intr_details, .actions = NULL, }; return (t4_handle_intr(adap, &cplsw_intr_info, 0, verbose)); } #define T4_LE_FATAL_MASK (F_PARITYERR | F_UNKNOWNCMD | F_REQQPARERR) #define T5_LE_FATAL_MASK (T4_LE_FATAL_MASK | F_VFPARERR) #define T6_LE_PERRCRC_MASK (F_PIPELINEERR | F_CLIPTCAMACCFAIL | \ F_SRVSRAMACCFAIL | F_CLCAMCRCPARERR | F_CLCAMINTPERR | F_SSRAMINTPERR | \ F_SRVSRAMPERR | F_VFSRAMPERR | F_TCAMINTPERR | F_TCAMCRCERR | \ F_HASHTBLMEMACCERR | F_MAIFWRINTPERR | F_HASHTBLMEMCRCERR) #define T6_LE_FATAL_MASK (T6_LE_PERRCRC_MASK | F_T6_UNKNOWNCMD | \ F_TCAMACCFAIL | F_HASHTBLACCFAIL | F_CMDTIDERR | F_CMDPRSRINTERR | \ F_TOTCNTERR | F_CLCAMFIFOERR | F_CLIPSUBERR) /* * LE interrupt handler. */ static bool le_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details le_intr_details[] = { { F_REQQPARERR, "LE request queue parity error" }, { F_UNKNOWNCMD, "LE unknown command" }, { F_ACTRGNFULL, "LE active region full" }, { F_PARITYERR, "LE parity error" }, { F_LIPMISS, "LE LIP miss" }, { F_LIP0, "LE 0 LIP error" }, { 0 } }; static const struct intr_details t6_le_intr_details[] = { { F_CLIPSUBERR, "LE CLIP CAM reverse substitution error" }, { F_CLCAMFIFOERR, "LE CLIP CAM internal FIFO error" }, { F_CTCAMINVLDENT, "Invalid IPv6 CLIP TCAM entry" }, { F_TCAMINVLDENT, "Invalid IPv6 TCAM entry" }, { F_TOTCNTERR, "LE total active < TCAM count" }, { F_CMDPRSRINTERR, "LE internal error in parser" }, { F_CMDTIDERR, "Incorrect tid in LE command" }, { F_T6_ACTRGNFULL, "LE active region full" }, { F_T6_ACTCNTIPV6TZERO, "LE IPv6 active open TCAM counter -ve" }, { F_T6_ACTCNTIPV4TZERO, "LE IPv4 active open TCAM counter -ve" }, { F_T6_ACTCNTIPV6ZERO, "LE IPv6 active open counter -ve" }, { F_T6_ACTCNTIPV4ZERO, "LE IPv4 active open counter -ve" }, { F_HASHTBLACCFAIL, "Hash table read error (proto conflict)" }, { F_TCAMACCFAIL, "LE TCAM access failure" }, { F_T6_UNKNOWNCMD, "LE unknown command" }, { F_T6_LIP0, "LE found 0 LIP during CLIP substitution" }, { F_T6_LIPMISS, "LE CLIP lookup miss" }, { T6_LE_PERRCRC_MASK, "LE parity/CRC error" }, { 0 } }; struct intr_info le_intr_info = { .name = "LE_DB_INT_CAUSE", .cause_reg = A_LE_DB_INT_CAUSE, .enable_reg = A_LE_DB_INT_ENABLE, .fatal = 0, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = NULL, }; if (chip_id(adap) <= CHELSIO_T5) { le_intr_info.details = le_intr_details; le_intr_info.fatal = T5_LE_FATAL_MASK; } else { le_intr_info.details = t6_le_intr_details; le_intr_info.fatal = T6_LE_FATAL_MASK; } return (t4_handle_intr(adap, &le_intr_info, 0, verbose)); } /* * MPS interrupt handler. */ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details mps_rx_perr_intr_details[] = { { 0xffffffff, "MPS Rx parity error" }, { 0 } }; static const struct intr_info mps_rx_perr_intr_info = { .name = "MPS_RX_PERR_INT_CAUSE", .cause_reg = A_MPS_RX_PERR_INT_CAUSE, .enable_reg = A_MPS_RX_PERR_INT_ENABLE, .fatal = 0xffffffff, .flags = NONFATAL_IF_DISABLED, .details = mps_rx_perr_intr_details, .actions = NULL, }; static const struct intr_details mps_tx_intr_details[] = { { F_PORTERR, "MPS Tx destination port is disabled" }, { F_FRMERR, "MPS Tx framing error" }, { F_SECNTERR, "MPS Tx SOP/EOP error" }, { F_BUBBLE, "MPS Tx underflow" }, { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error" }, { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error" }, { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error" }, { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error" }, { 0 } }; static const struct intr_info mps_tx_intr_info = { .name = "MPS_TX_INT_CAUSE", .cause_reg = A_MPS_TX_INT_CAUSE, .enable_reg = A_MPS_TX_INT_ENABLE, .fatal = 0x1ffff, .flags = NONFATAL_IF_DISABLED, .details = mps_tx_intr_details, .actions = NULL, }; static const struct intr_details mps_trc_intr_details[] = { { F_MISCPERR, "MPS TRC misc parity error" }, { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error" }, { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error" }, { 0 } }; static const struct intr_info mps_trc_intr_info = { .name = "MPS_TRC_INT_CAUSE", .cause_reg = A_MPS_TRC_INT_CAUSE, .enable_reg = A_MPS_TRC_INT_ENABLE, .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM), .flags = 0, .details = mps_trc_intr_details, .actions = NULL, }; static const struct intr_details mps_stat_sram_intr_details[] = { { 0xffffffff, "MPS statistics SRAM parity error" }, { 0 } }; static const struct intr_info mps_stat_sram_intr_info = { .name = "MPS_STAT_PERR_INT_CAUSE_SRAM", .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM, .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM, .fatal = 0x1fffffff, .flags = NONFATAL_IF_DISABLED, .details = mps_stat_sram_intr_details, .actions = NULL, }; static const struct intr_details mps_stat_tx_intr_details[] = { { 0xffffff, "MPS statistics Tx FIFO parity error" }, { 0 } }; static const struct intr_info mps_stat_tx_intr_info = { .name = "MPS_STAT_PERR_INT_CAUSE_TX_FIFO", .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO, .fatal = 0xffffff, .flags = NONFATAL_IF_DISABLED, .details = mps_stat_tx_intr_details, .actions = NULL, }; static const struct intr_details mps_stat_rx_intr_details[] = { { 0xffffff, "MPS statistics Rx FIFO parity error" }, { 0 } }; static const struct intr_info mps_stat_rx_intr_info = { .name = "MPS_STAT_PERR_INT_CAUSE_RX_FIFO", .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_RX_FIFO, .fatal = 0xffffff, .flags = 0, .details = mps_stat_rx_intr_details, .actions = NULL, }; static const struct intr_details mps_cls_intr_details[] = { { F_HASHSRAM, "MPS hash SRAM parity error" }, { F_MATCHTCAM, "MPS match TCAM parity error" }, { F_MATCHSRAM, "MPS match SRAM parity error" }, { 0 } }; static const struct intr_info mps_cls_intr_info = { .name = "MPS_CLS_INT_CAUSE", .cause_reg = A_MPS_CLS_INT_CAUSE, .enable_reg = A_MPS_CLS_INT_ENABLE, .fatal = F_MATCHSRAM | F_MATCHTCAM | F_HASHSRAM, .flags = 0, .details = mps_cls_intr_details, .actions = NULL, }; static const struct intr_details mps_stat_sram1_intr_details[] = { { 0xff, "MPS statistics SRAM1 parity error" }, { 0 } }; static const struct intr_info mps_stat_sram1_intr_info = { .name = "MPS_STAT_PERR_INT_CAUSE_SRAM1", .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM1, .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM1, .fatal = 0xff, .flags = 0, .details = mps_stat_sram1_intr_details, .actions = NULL, }; bool fatal; fatal = false; fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, verbose); if (chip_id(adap) > CHELSIO_T4) { fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0, verbose); } t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff); t4_read_reg(adap, A_MPS_INT_CAUSE); /* flush */ return (fatal); } /* * EDC/MC interrupt handler. */ static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose) { static const char name[4][5] = { "EDC0", "EDC1", "MC0", "MC1" }; unsigned int count_reg, v; static const struct intr_details mem_intr_details[] = { { F_ECC_UE_INT_CAUSE, "Uncorrectable ECC data error(s)" }, { F_ECC_CE_INT_CAUSE, "Correctable ECC data error(s)" }, { F_PERR_INT_CAUSE, "FIFO parity error" }, { 0 } }; struct intr_info ii = { .fatal = F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE, .details = mem_intr_details, .flags = 0, .actions = NULL, }; bool fatal; switch (idx) { case MEM_EDC0: ii.name = "EDC0_INT_CAUSE"; ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 0); ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 0); count_reg = EDC_REG(A_EDC_ECC_STATUS, 0); break; case MEM_EDC1: ii.name = "EDC1_INT_CAUSE"; ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 1); ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 1); count_reg = EDC_REG(A_EDC_ECC_STATUS, 1); break; case MEM_MC0: ii.name = "MC0_INT_CAUSE"; if (is_t4(adap)) { ii.cause_reg = A_MC_INT_CAUSE; ii.enable_reg = A_MC_INT_ENABLE; count_reg = A_MC_ECC_STATUS; } else { ii.cause_reg = A_MC_P_INT_CAUSE; ii.enable_reg = A_MC_P_INT_ENABLE; count_reg = A_MC_P_ECC_STATUS; } break; case MEM_MC1: ii.name = "MC1_INT_CAUSE"; ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, 1); ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, 1); count_reg = MC_REG(A_MC_P_ECC_STATUS, 1); break; } fatal = t4_handle_intr(adap, &ii, 0, verbose); v = t4_read_reg(adap, count_reg); if (v != 0) { if (G_ECC_UECNT(v) != 0) { CH_ALERT(adap, "%s: %u uncorrectable ECC data error(s)\n", name[idx], G_ECC_UECNT(v)); } if (G_ECC_CECNT(v) != 0) { if (idx <= MEM_EDC1) t4_edc_err_read(adap, idx); CH_WARN_RATELIMIT(adap, "%s: %u correctable ECC data error(s)\n", name[idx], G_ECC_CECNT(v)); } t4_write_reg(adap, count_reg, 0xffffffff); } return (fatal); } static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose) { u32 v; v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS); CH_ALERT(adap, "MA address wrap-around error by client %u to address %#x\n", G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4); t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v); return (false); } /* * MA interrupt handler. */ static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_action ma_intr_actions[] = { { F_MEM_WRAP_INT_CAUSE, 0, ma_wrap_status }, { 0 }, }; static const struct intr_info ma_intr_info = { .name = "MA_INT_CAUSE", .cause_reg = A_MA_INT_CAUSE, .enable_reg = A_MA_INT_ENABLE, .fatal = F_MEM_PERR_INT_CAUSE | F_MEM_TO_INT_CAUSE, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = ma_intr_actions, }; static const struct intr_info ma_perr_status1 = { .name = "MA_PARITY_ERROR_STATUS1", .cause_reg = A_MA_PARITY_ERROR_STATUS1, .enable_reg = A_MA_PARITY_ERROR_ENABLE1, .fatal = 0xffffffff, .flags = 0, .details = NULL, .actions = NULL, }; static const struct intr_info ma_perr_status2 = { .name = "MA_PARITY_ERROR_STATUS2", .cause_reg = A_MA_PARITY_ERROR_STATUS2, .enable_reg = A_MA_PARITY_ERROR_ENABLE2, .fatal = 0xffffffff, .flags = 0, .details = NULL, .actions = NULL, }; bool fatal; fatal = false; fatal |= t4_handle_intr(adap, &ma_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, verbose); if (chip_id(adap) > CHELSIO_T4) fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, verbose); return (fatal); } /* * SMB interrupt handler. */ static bool smb_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details smb_intr_details[] = { { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error" }, { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error" }, { F_SLVFIFOPARINT, "SMB slave FIFO parity error" }, { 0 } }; static const struct intr_info smb_intr_info = { .name = "SMB_INT_CAUSE", .cause_reg = A_SMB_INT_CAUSE, .enable_reg = A_SMB_INT_ENABLE, .fatal = F_SLVFIFOPARINT | F_MSTRXFIFOPARINT | F_MSTTXFIFOPARINT, .flags = 0, .details = smb_intr_details, .actions = NULL, }; return (t4_handle_intr(adap, &smb_intr_info, 0, verbose)); } /* * NC-SI interrupt handler. */ static bool ncsi_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details ncsi_intr_details[] = { { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error" }, { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error" }, { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error" }, { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error" }, { 0 } }; static const struct intr_info ncsi_intr_info = { .name = "NCSI_INT_CAUSE", .cause_reg = A_NCSI_INT_CAUSE, .enable_reg = A_NCSI_INT_ENABLE, .fatal = F_RXFIFO_PRTY_ERR | F_TXFIFO_PRTY_ERR | F_MPS_DM_PRTY_ERR | F_CIM_DM_PRTY_ERR, .flags = 0, .details = ncsi_intr_details, .actions = NULL, }; return (t4_handle_intr(adap, &ncsi_intr_info, 0, verbose)); } /* * MAC interrupt handler. */ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose) { static const struct intr_details mac_intr_details[] = { { F_TXFIFO_PRTY_ERR, "MAC Tx FIFO parity error" }, { F_RXFIFO_PRTY_ERR, "MAC Rx FIFO parity error" }, { 0 } }; char name[32]; struct intr_info ii; bool fatal = false; if (is_t4(adap)) { snprintf(name, sizeof(name), "XGMAC_PORT%u_INT_CAUSE", port); ii.name = &name[0]; ii.cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); ii.enable_reg = PORT_REG(port, A_XGMAC_PORT_INT_EN); ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR; ii.flags = 0; ii.details = mac_intr_details; ii.actions = NULL; } else { snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port); ii.name = &name[0]; ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_INT_EN); ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR; ii.flags = 0; ii.details = mac_intr_details; ii.actions = NULL; } fatal |= t4_handle_intr(adap, &ii, 0, verbose); if (chip_id(adap) >= CHELSIO_T5) { snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port); ii.name = &name[0]; ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE); ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN); ii.fatal = 0; ii.flags = 0; ii.details = NULL; ii.actions = NULL; fatal |= t4_handle_intr(adap, &ii, 0, verbose); } if (chip_id(adap) >= CHELSIO_T6) { snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port); ii.name = &name[0]; ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G); ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN_100G); ii.fatal = 0; ii.flags = 0; ii.details = NULL; ii.actions = NULL; fatal |= t4_handle_intr(adap, &ii, 0, verbose); } return (fatal); } static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details plpl_intr_details[] = { { F_FATALPERR, "Fatal parity error" }, { F_PERRVFID, "VFID_MAP parity error" }, { 0 } }; static const struct intr_info plpl_intr_info = { .name = "PL_PL_INT_CAUSE", .cause_reg = A_PL_PL_INT_CAUSE, .enable_reg = A_PL_PL_INT_ENABLE, .fatal = F_FATALPERR | F_PERRVFID, .flags = NONFATAL_IF_DISABLED, .details = plpl_intr_details, .actions = NULL, }; return (t4_handle_intr(adap, &plpl_intr_info, 0, verbose)); } /** * t4_slow_intr_handler - control path interrupt handler * @adap: the adapter * @verbose: increased verbosity, for debug * * T4 interrupt handler for non-data global interrupt events, e.g., errors. * The designation 'slow' is because it involves register reads, while * data interrupts typically don't involve any MMIOs. */ int t4_slow_intr_handler(struct adapter *adap, bool verbose) { static const struct intr_details pl_intr_details[] = { { F_MC1, "MC1" }, { F_UART, "UART" }, { F_ULP_TX, "ULP TX" }, { F_SGE, "SGE" }, { F_HMA, "HMA" }, { F_CPL_SWITCH, "CPL Switch" }, { F_ULP_RX, "ULP RX" }, { F_PM_RX, "PM RX" }, { F_PM_TX, "PM TX" }, { F_MA, "MA" }, { F_TP, "TP" }, { F_LE, "LE" }, { F_EDC1, "EDC1" }, { F_EDC0, "EDC0" }, { F_MC, "MC0" }, { F_PCIE, "PCIE" }, { F_PMU, "PMU" }, { F_MAC3, "MAC3" }, { F_MAC2, "MAC2" }, { F_MAC1, "MAC1" }, { F_MAC0, "MAC0" }, { F_SMB, "SMB" }, { F_SF, "SF" }, { F_PL, "PL" }, { F_NCSI, "NC-SI" }, { F_MPS, "MPS" }, { F_MI, "MI" }, { F_DBG, "DBG" }, { F_I2CM, "I2CM" }, { F_CIM, "CIM" }, { 0 } }; static const struct intr_info pl_perr_cause = { .name = "PL_PERR_CAUSE", .cause_reg = A_PL_PERR_CAUSE, .enable_reg = A_PL_PERR_ENABLE, .fatal = 0xffffffff, .flags = 0, .details = pl_intr_details, .actions = NULL, }; static const struct intr_action pl_intr_action[] = { { F_MC1, MEM_MC1, mem_intr_handler }, { F_ULP_TX, -1, ulptx_intr_handler }, { F_SGE, -1, sge_intr_handler }, { F_CPL_SWITCH, -1, cplsw_intr_handler }, { F_ULP_RX, -1, ulprx_intr_handler }, { F_PM_RX, -1, pmrx_intr_handler}, { F_PM_TX, -1, pmtx_intr_handler}, { F_MA, -1, ma_intr_handler }, { F_TP, -1, tp_intr_handler }, { F_LE, -1, le_intr_handler }, { F_EDC1, MEM_EDC1, mem_intr_handler }, { F_EDC0, MEM_EDC0, mem_intr_handler }, { F_MC0, MEM_MC0, mem_intr_handler }, { F_PCIE, -1, pcie_intr_handler }, { F_MAC3, 3, mac_intr_handler}, { F_MAC2, 2, mac_intr_handler}, { F_MAC1, 1, mac_intr_handler}, { F_MAC0, 0, mac_intr_handler}, { F_SMB, -1, smb_intr_handler}, { F_PL, -1, plpl_intr_handler }, { F_NCSI, -1, ncsi_intr_handler}, { F_MPS, -1, mps_intr_handler }, { F_CIM, -1, cim_intr_handler }, { 0 } }; static const struct intr_info pl_intr_info = { .name = "PL_INT_CAUSE", .cause_reg = A_PL_INT_CAUSE, .enable_reg = A_PL_INT_ENABLE, .fatal = 0, .flags = 0, .details = pl_intr_details, .actions = pl_intr_action, }; bool fatal; u32 perr; perr = t4_read_reg(adap, pl_perr_cause.cause_reg); if (verbose || perr != 0) { t4_show_intr_info(adap, &pl_perr_cause, perr); if (perr != 0) t4_write_reg(adap, pl_perr_cause.cause_reg, perr); if (verbose) perr |= t4_read_reg(adap, pl_intr_info.enable_reg); } fatal = t4_handle_intr(adap, &pl_intr_info, perr, verbose); if (fatal) t4_fatal_err(adap, false); return (0); } #define PF_INTR_MASK (F_PFSW | F_PFCIM) /** * t4_intr_enable - enable interrupts * @adapter: the adapter whose interrupts should be enabled * * Enable PF-specific interrupts for the calling function and the top-level * interrupt concentrator for global interrupts. Interrupts are already * enabled at each module, here we just enable the roots of the interrupt * hierarchies. * * Note: this function should be called only when the driver manages * non PF-specific interrupts from the various HW modules. Only one PCI * function at a time should be doing this. */ void t4_intr_enable(struct adapter *adap) { u32 val = 0; if (chip_id(adap) <= CHELSIO_T5) val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT; else val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN; val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR; t4_set_reg_field(adap, A_SGE_INT_ENABLE3, val, val); t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0); t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf); } /** * t4_intr_disable - disable interrupts * @adap: the adapter whose interrupts should be disabled * * Disable interrupts. We only disable the top-level interrupt * concentrators. The caller must be a PCI function managing global * interrupts. */ void t4_intr_disable(struct adapter *adap) { t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), 0); t4_set_reg_field(adap, A_PL_INT_MAP0, 1 << adap->pf, 0); } /** * t4_intr_clear - clear all interrupts * @adap: the adapter whose interrupts should be cleared * * Clears all interrupts. The caller must be a PCI function managing * global interrupts. */ void t4_intr_clear(struct adapter *adap) { static const u32 cause_reg[] = { A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE, MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), A_CPL_INTR_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 0), EDC_REG(A_EDC_INT_CAUSE, 1), A_LE_DB_INT_CAUSE, A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE, A_MPS_CLS_INT_CAUSE, A_MPS_RX_PERR_INT_CAUSE, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, A_MPS_STAT_PERR_INT_CAUSE_SRAM, A_MPS_TRC_INT_CAUSE, A_MPS_TX_INT_CAUSE, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, A_NCSI_INT_CAUSE, A_PCIE_INT_CAUSE, A_PCIE_NONFAT_ERR, A_PL_PL_INT_CAUSE, A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE, A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3, A_SGE_INT_CAUSE4, A_SMB_INT_CAUSE, A_TP_INT_CAUSE, A_ULP_RX_INT_CAUSE, A_ULP_RX_INT_CAUSE_2, A_ULP_TX_INT_CAUSE, A_ULP_TX_INT_CAUSE_2, MYPF_REG(A_PL_PF_INT_CAUSE), }; int i; const int nchan = adap->chip_params->nchan; for (i = 0; i < ARRAY_SIZE(cause_reg); i++) t4_write_reg(adap, cause_reg[i], 0xffffffff); if (is_t4(adap)) { t4_write_reg(adap, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 0xffffffff); t4_write_reg(adap, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 0xffffffff); t4_write_reg(adap, A_MC_INT_CAUSE, 0xffffffff); for (i = 0; i < nchan; i++) { t4_write_reg(adap, PORT_REG(i, A_XGMAC_PORT_INT_CAUSE), 0xffffffff); } } if (chip_id(adap) >= CHELSIO_T5) { t4_write_reg(adap, A_MA_PARITY_ERROR_STATUS2, 0xffffffff); t4_write_reg(adap, A_MPS_STAT_PERR_INT_CAUSE_SRAM1, 0xffffffff); t4_write_reg(adap, A_SGE_INT_CAUSE5, 0xffffffff); t4_write_reg(adap, A_MC_P_INT_CAUSE, 0xffffffff); if (is_t5(adap)) { t4_write_reg(adap, MC_REG(A_MC_P_INT_CAUSE, 1), 0xffffffff); } for (i = 0; i < nchan; i++) { t4_write_reg(adap, T5_PORT_REG(i, A_MAC_PORT_PERR_INT_CAUSE), 0xffffffff); if (chip_id(adap) > CHELSIO_T5) { t4_write_reg(adap, T5_PORT_REG(i, A_MAC_PORT_PERR_INT_CAUSE_100G), 0xffffffff); } t4_write_reg(adap, T5_PORT_REG(i, A_MAC_PORT_INT_CAUSE), 0xffffffff); } } if (chip_id(adap) >= CHELSIO_T6) { t4_write_reg(adap, A_SGE_INT_CAUSE6, 0xffffffff); } t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff); t4_write_reg(adap, A_PL_PERR_CAUSE, 0xffffffff); t4_write_reg(adap, A_PL_INT_CAUSE, 0xffffffff); (void) t4_read_reg(adap, A_PL_INT_CAUSE); /* flush */ } /** * hash_mac_addr - return the hash value of a MAC address * @addr: the 48-bit Ethernet MAC address * * Hashes a MAC address according to the hash function used by HW inexact * (hash) address matching. */ static int hash_mac_addr(const u8 *addr) { u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; a ^= b; a ^= (a >> 12); a ^= (a >> 6); return a & 0x3f; } /** * t4_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter * @mbox: mbox to use for the FW command * @viid: virtual interface whose RSS subtable is to be written * @start: start entry in the table to write * @n: how many table entries to write * @rspq: values for the "response queue" (Ingress Queue) lookup table * @nrspq: number of values in @rspq * * Programs the selected part of the VI's RSS mapping table with the * provided values. If @nrspq < @n the supplied values are used repeatedly * until the full table range is populated. * * The caller must ensure the values in @rspq are in the range allowed for * @viid. */ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq) { int ret; const u16 *rsp = rspq; const u16 *rsp_end = rspq + nrspq; struct fw_rss_ind_tbl_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_RSS_IND_TBL_CMD_VIID(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); /* * Each firmware RSS command can accommodate up to 32 RSS Ingress * Queue Identifiers. These Ingress Queue IDs are packed three to * a 32-bit word as 10-bit values with the upper remaining 2 bits * reserved. */ while (n > 0) { int nq = min(n, 32); int nq_packed = 0; __be32 *qp = &cmd.iq0_to_iq2; /* * Set up the firmware RSS command header to send the next * "nq" Ingress Queue IDs to the firmware. */ cmd.niqid = cpu_to_be16(nq); cmd.startidx = cpu_to_be16(start); /* * "nq" more done for the start of the next loop. */ start += nq; n -= nq; /* * While there are still Ingress Queue IDs to stuff into the * current firmware RSS command, retrieve them from the * Ingress Queue ID array and insert them into the command. */ while (nq > 0) { /* * Grab up to the next 3 Ingress Queue IDs (wrapping * around the Ingress Queue ID array if necessary) and * insert them into the firmware RSS command at the * current 3-tuple position within the commad. */ u16 qbuf[3]; u16 *qbp = qbuf; int nqbuf = min(3, nq); nq -= nqbuf; qbuf[0] = qbuf[1] = qbuf[2] = 0; while (nqbuf && nq_packed < 32) { nqbuf--; nq_packed++; *qbp++ = *rsp++; if (rsp >= rsp_end) rsp = rspq; } *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); } /* * Send this portion of the RRS table update to the firmware; * bail out on any errors. */ ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); if (ret) return ret; } return 0; } /** * t4_config_glbl_rss - configure the global RSS mode * @adapter: the adapter * @mbox: mbox to use for the FW command * @mode: global RSS mode * @flags: mode-specific flags * * Sets the global RSS mode. */ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, unsigned int flags) { struct fw_rss_glb_config_cmd c; memset(&c, 0, sizeof(c)); c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { c.u.manual.mode_pkd = cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { c.u.basicvirtual.mode_keymode = cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags); } else return -EINVAL; return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); } /** * t4_config_vi_rss - configure per VI RSS settings * @adapter: the adapter * @mbox: mbox to use for the FW command * @viid: the VI id * @flags: RSS flags * @defq: id of the default RSS queue for the VI. * @skeyidx: RSS secret key table index for non-global mode * @skey: RSS vf_scramble key for VI. * * Configures VI-specific RSS properties. */ int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, unsigned int flags, unsigned int defq, unsigned int skeyidx, unsigned int skey) { struct fw_rss_vi_config_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32( V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx)); c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey); return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); } /* Read an RSS table row */ static int rd_rss_row(struct adapter *adap, int row, u32 *val) { t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 5, 0, val); } /** * t4_read_rss - read the contents of the RSS mapping table * @adapter: the adapter * @map: holds the contents of the RSS mapping table * * Reads the contents of the RSS hash->queue mapping table. */ int t4_read_rss(struct adapter *adapter, u16 *map) { u32 val; int i, ret; for (i = 0; i < RSS_NENTRIES / 2; ++i) { ret = rd_rss_row(adapter, i, &val); if (ret) return ret; *map++ = G_LKPTBLQUEUE0(val); *map++ = G_LKPTBLQUEUE1(val); } return 0; } /** * t4_tp_fw_ldst_rw - Access TP indirect register through LDST * @adap: the adapter * @cmd: TP fw ldst address space type * @vals: where the indirect register values are stored/written * @nregs: how many indirect registers to read/write * @start_idx: index of first indirect register to read/write * @rw: Read (1) or Write (0) * @sleep_ok: if true we may sleep while awaiting command completion * * Access TP indirect registers through LDST **/ static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals, unsigned int nregs, unsigned int start_index, unsigned int rw, bool sleep_ok) { int ret = 0; unsigned int i; struct fw_ldst_cmd c; for (i = 0; i < nregs; i++) { memset(&c, 0, sizeof(c)); c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | (rw ? F_FW_CMD_READ : F_FW_CMD_WRITE) | V_FW_LDST_CMD_ADDRSPACE(cmd)); c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c.u.addrval.addr = cpu_to_be32(start_index + i); c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); if (ret) return ret; if (rw) vals[i] = be32_to_cpu(c.u.addrval.val); } return 0; } /** * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor * @adap: the adapter * @reg_addr: Address Register * @reg_data: Data register * @buff: where the indirect register values are stored/written * @nregs: how many indirect registers to read/write * @start_index: index of first indirect register to read/write * @rw: READ(1) or WRITE(0) * @sleep_ok: if true we may sleep while awaiting command completion * * Read/Write TP indirect registers through LDST if possible. * Else, use backdoor access **/ static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data, u32 *buff, u32 nregs, u32 start_index, int rw, bool sleep_ok) { int rc = -EINVAL; int cmd; switch (reg_addr) { case A_TP_PIO_ADDR: cmd = FW_LDST_ADDRSPC_TP_PIO; break; case A_TP_TM_PIO_ADDR: cmd = FW_LDST_ADDRSPC_TP_TM_PIO; break; case A_TP_MIB_INDEX: cmd = FW_LDST_ADDRSPC_TP_MIB; break; default: goto indirect_access; } if (t4_use_ldst(adap)) rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw, sleep_ok); indirect_access: if (rc) { if (rw) t4_read_indirect(adap, reg_addr, reg_data, buff, nregs, start_index); else t4_write_indirect(adap, reg_addr, reg_data, buff, nregs, start_index); } } /** * t4_tp_pio_read - Read TP PIO registers * @adap: the adapter * @buff: where the indirect register values are written * @nregs: how many indirect registers to read * @start_index: index of first indirect register to read * @sleep_ok: if true we may sleep while awaiting command completion * * Read TP PIO Registers **/ void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok) { t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs, start_index, 1, sleep_ok); } /** * t4_tp_pio_write - Write TP PIO registers * @adap: the adapter * @buff: where the indirect register values are stored * @nregs: how many indirect registers to write * @start_index: index of first indirect register to write * @sleep_ok: if true we may sleep while awaiting command completion * * Write TP PIO Registers **/ void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs, u32 start_index, bool sleep_ok) { t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok); } /** * t4_tp_tm_pio_read - Read TP TM PIO registers * @adap: the adapter * @buff: where the indirect register values are written * @nregs: how many indirect registers to read * @start_index: index of first indirect register to read * @sleep_ok: if true we may sleep while awaiting command completion * * Read TP TM PIO Registers **/ void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok) { t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff, nregs, start_index, 1, sleep_ok); } /** * t4_tp_mib_read - Read TP MIB registers * @adap: the adapter * @buff: where the indirect register values are written * @nregs: how many indirect registers to read * @start_index: index of first indirect register to read * @sleep_ok: if true we may sleep while awaiting command completion * * Read TP MIB Registers **/ void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok) { t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs, start_index, 1, sleep_ok); } /** * t4_read_rss_key - read the global RSS key * @adap: the adapter * @key: 10-entry array holding the 320-bit RSS key * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the global 320-bit RSS key. */ void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok) { t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); } /** * t4_write_rss_key - program one of the RSS keys * @adap: the adapter * @key: 10-entry array holding the 320-bit RSS key * @idx: which RSS key to write * @sleep_ok: if true we may sleep while awaiting command completion * * Writes one of the RSS keys with the given 320-bit value. If @idx is * 0..15 the corresponding entry in the RSS key table is written, * otherwise the global RSS key is written. */ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, bool sleep_ok) { u8 rss_key_addr_cnt = 16; u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); /* * T6 and later: for KeyMode 3 (per-vf and per-vf scramble), * allows access to key addresses 16-63 by using KeyWrAddrX * as index[5:4](upper 2) into key table */ if ((chip_id(adap) > CHELSIO_T5) && (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) rss_key_addr_cnt = 32; t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); if (idx >= 0 && idx < rss_key_addr_cnt) { if (rss_key_addr_cnt > 16) t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, vrt | V_KEYWRADDRX(idx >> 4) | V_T6_VFWRADDR(idx) | F_KEYWREN); else t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, vrt| V_KEYWRADDR(idx) | F_KEYWREN); } } /** * t4_read_rss_pf_config - read PF RSS Configuration Table * @adapter: the adapter * @index: the entry in the PF RSS table to read * @valp: where to store the returned value * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the PF RSS Configuration Table at the specified index and returns * the value found there. */ void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp, bool sleep_ok) { t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok); } /** * t4_write_rss_pf_config - write PF RSS Configuration Table * @adapter: the adapter * @index: the entry in the VF RSS table to read * @val: the value to store * @sleep_ok: if true we may sleep while awaiting command completion * * Writes the PF RSS Configuration Table at the specified index with the * specified value. */ void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val, bool sleep_ok) { t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok); } /** * t4_read_rss_vf_config - read VF RSS Configuration Table * @adapter: the adapter * @index: the entry in the VF RSS table to read * @vfl: where to store the returned VFL * @vfh: where to store the returned VFH * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the VF RSS Configuration Table at the specified index and returns * the (VFL, VFH) values found there. */ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, u32 *vfl, u32 *vfh, bool sleep_ok) { u32 vrt, mask, data; if (chip_id(adapter) <= CHELSIO_T5) { mask = V_VFWRADDR(M_VFWRADDR); data = V_VFWRADDR(index); } else { mask = V_T6_VFWRADDR(M_T6_VFWRADDR); data = V_T6_VFWRADDR(index); } /* * Request that the index'th VF Table values be read into VFL/VFH. */ vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); vrt |= data | F_VFRDEN; t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); /* * Grab the VFL/VFH values ... */ t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); } /** * t4_write_rss_vf_config - write VF RSS Configuration Table * * @adapter: the adapter * @index: the entry in the VF RSS table to write * @vfl: the VFL to store * @vfh: the VFH to store * * Writes the VF RSS Configuration Table at the specified index with the * specified (VFL, VFH) values. */ void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, u32 vfl, u32 vfh, bool sleep_ok) { u32 vrt, mask, data; if (chip_id(adapter) <= CHELSIO_T5) { mask = V_VFWRADDR(M_VFWRADDR); data = V_VFWRADDR(index); } else { mask = V_T6_VFWRADDR(M_T6_VFWRADDR); data = V_T6_VFWRADDR(index); } /* * Load up VFL/VFH with the values to be written ... */ t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); /* * Write the VFL/VFH into the VF Table at index'th location. */ vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); vrt |= data | F_VFRDEN; t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); } /** * t4_read_rss_pf_map - read PF RSS Map * @adapter: the adapter * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the PF RSS Map register and returns its value. */ u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok) { u32 pfmap; t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); return pfmap; } /** * t4_write_rss_pf_map - write PF RSS Map * @adapter: the adapter * @pfmap: PF RSS Map value * * Writes the specified value to the PF RSS Map register. */ void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok) { t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); } /** * t4_read_rss_pf_mask - read PF RSS Mask * @adapter: the adapter * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the PF RSS Mask register and returns its value. */ u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok) { u32 pfmask; t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); return pfmask; } /** * t4_write_rss_pf_mask - write PF RSS Mask * @adapter: the adapter * @pfmask: PF RSS Mask value * * Writes the specified value to the PF RSS Mask register. */ void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok) { t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); } /** * t4_tp_get_tcp_stats - read TP's TCP MIB counters * @adap: the adapter * @v4: holds the TCP/IP counter values * @v6: holds the TCP/IPv6 counter values * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. * Either @v4 or @v6 may be %NULL to skip the corresponding stats. */ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6, bool sleep_ok) { u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) #define STAT(x) val[STAT_IDX(x)] #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) if (v4) { t4_tp_mib_read(adap, val, ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST, sleep_ok); v4->tcp_out_rsts = STAT(OUT_RST); v4->tcp_in_segs = STAT64(IN_SEG); v4->tcp_out_segs = STAT64(OUT_SEG); v4->tcp_retrans_segs = STAT64(RXT_SEG); } if (v6) { t4_tp_mib_read(adap, val, ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST, sleep_ok); v6->tcp_out_rsts = STAT(OUT_RST); v6->tcp_in_segs = STAT64(IN_SEG); v6->tcp_out_segs = STAT64(OUT_SEG); v6->tcp_retrans_segs = STAT64(RXT_SEG); } #undef STAT64 #undef STAT #undef STAT_IDX } /** * t4_tp_get_err_stats - read TP's error MIB counters * @adap: the adapter * @st: holds the counter values * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's error counters. */ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, bool sleep_ok) { int nchan = adap->chip_params->nchan; t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0, sleep_ok); t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0, sleep_ok); t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0, sleep_ok); t4_tp_mib_read(adap, st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0, sleep_ok); t4_tp_mib_read(adap, st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0, sleep_ok); t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0, sleep_ok); t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0, sleep_ok); t4_tp_mib_read(adap, st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok); t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP, sleep_ok); } /** * t4_tp_get_proxy_stats - read TP's proxy MIB counters * @adap: the adapter * @st: holds the counter values * * Returns the values of TP's proxy counters. */ void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st, bool sleep_ok) { int nchan = adap->chip_params->nchan; t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok); } /** * t4_tp_get_cpl_stats - read TP's CPL MIB counters * @adap: the adapter * @st: holds the counter values * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's CPL counters. */ void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, bool sleep_ok) { int nchan = adap->chip_params->nchan; t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok); t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok); } /** * t4_tp_get_rdma_stats - read TP's RDMA MIB counters * @adap: the adapter * @st: holds the counter values * * Returns the values of TP's RDMA counters. */ void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, bool sleep_ok) { t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT, sleep_ok); } /** * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port * @adap: the adapter * @idx: the port index * @st: holds the counter values * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's FCoE counters for the selected port. */ void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, struct tp_fcoe_stats *st, bool sleep_ok) { u32 val[2]; t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx, sleep_ok); t4_tp_mib_read(adap, &st->frames_drop, 1, A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok); t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx, sleep_ok); st->octets_ddp = ((u64)val[0] << 32) | val[1]; } /** * t4_get_usm_stats - read TP's non-TCP DDP MIB counters * @adap: the adapter * @st: holds the counter values * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's counters for non-TCP directly-placed packets. */ void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, bool sleep_ok) { u32 val[4]; t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok); st->frames = val[0]; st->drops = val[1]; st->octets = ((u64)val[2] << 32) | val[3]; } /** * t4_read_mtu_tbl - returns the values in the HW path MTU table * @adap: the adapter * @mtus: where to store the MTU values * @mtu_log: where to store the MTU base-2 log (may be %NULL) * * Reads the HW path MTU table. */ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) { u32 v; int i; for (i = 0; i < NMTUS; ++i) { t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(0xff) | V_MTUVALUE(i)); v = t4_read_reg(adap, A_TP_MTU_TABLE); mtus[i] = G_MTUVALUE(v); if (mtu_log) mtu_log[i] = G_MTUWIDTH(v); } } /** * t4_read_cong_tbl - reads the congestion control table * @adap: the adapter * @incr: where to store the alpha values * * Reads the additive increments programmed into the HW congestion * control table. */ void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) { unsigned int mtu, w; for (mtu = 0; mtu < NMTUS; ++mtu) for (w = 0; w < NCCTRL_WIN; ++w) { t4_write_reg(adap, A_TP_CCTRL_TABLE, V_ROWINDEX(0xffff) | (mtu << 5) | w); incr[mtu][w] = (u16)t4_read_reg(adap, A_TP_CCTRL_TABLE) & 0x1fff; } } /** * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register * @adap: the adapter * @addr: the indirect TP register address * @mask: specifies the field within the register to modify * @val: new value for the field * * Sets a field of an indirect TP register to the given value. */ void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, unsigned int mask, unsigned int val) { t4_write_reg(adap, A_TP_PIO_ADDR, addr); val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; t4_write_reg(adap, A_TP_PIO_DATA, val); } /** * init_cong_ctrl - initialize congestion control parameters * @a: the alpha values for congestion control * @b: the beta values for congestion control * * Initialize the congestion control parameters. */ static void init_cong_ctrl(unsigned short *a, unsigned short *b) { a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; a[9] = 2; a[10] = 3; a[11] = 4; a[12] = 5; a[13] = 6; a[14] = 7; a[15] = 8; a[16] = 9; a[17] = 10; a[18] = 14; a[19] = 17; a[20] = 21; a[21] = 25; a[22] = 30; a[23] = 35; a[24] = 45; a[25] = 60; a[26] = 80; a[27] = 100; a[28] = 200; a[29] = 300; a[30] = 400; a[31] = 500; b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; b[9] = b[10] = 1; b[11] = b[12] = 2; b[13] = b[14] = b[15] = b[16] = 3; b[17] = b[18] = b[19] = b[20] = b[21] = 4; b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; b[28] = b[29] = 6; b[30] = b[31] = 7; } /* The minimum additive increment value for the congestion control table */ #define CC_MIN_INCR 2U /** * t4_load_mtus - write the MTU and congestion control HW tables * @adap: the adapter * @mtus: the values for the MTU table * @alpha: the values for the congestion control alpha parameter * @beta: the values for the congestion control beta parameter * * Write the HW MTU table with the supplied MTUs and the high-speed * congestion control table with the supplied alpha, beta, and MTUs. * We write the two tables together because the additive increments * depend on the MTUs. */ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, const unsigned short *alpha, const unsigned short *beta) { static const unsigned int avg_pkts[NCCTRL_WIN] = { 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 28672, 40960, 57344, 81920, 114688, 163840, 229376 }; unsigned int i, w; for (i = 0; i < NMTUS; ++i) { unsigned int mtu = mtus[i]; unsigned int log2 = fls(mtu); if (!(mtu & ((1 << log2) >> 2))) /* round */ log2--; t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); for (w = 0; w < NCCTRL_WIN; ++w) { unsigned int inc; inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], CC_MIN_INCR); t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | (w << 16) | (beta[w] << 13) | inc); } } } /** * t4_set_pace_tbl - set the pace table * @adap: the adapter * @pace_vals: the pace values in microseconds * @start: index of the first entry in the HW pace table to set * @n: how many entries to set * * Sets (a subset of the) HW pace table. */ int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, unsigned int start, unsigned int n) { unsigned int vals[NTX_SCHED], i; unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); if (n > NTX_SCHED) return -ERANGE; /* convert values from us to dack ticks, rounding to closest value */ for (i = 0; i < n; i++, pace_vals++) { vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns; if (vals[i] > 0x7ff) return -ERANGE; if (*pace_vals && vals[i] == 0) return -ERANGE; } for (i = 0; i < n; i++, start++) t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]); return 0; } /** * t4_set_sched_bps - set the bit rate for a HW traffic scheduler * @adap: the adapter * @kbps: target rate in Kbps * @sched: the scheduler index * * Configure a Tx HW scheduler for the target rate. */ int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps) { unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; unsigned int clk = adap->params.vpd.cclk * 1000; unsigned int selected_cpt = 0, selected_bpt = 0; if (kbps > 0) { kbps *= 125; /* -> bytes */ for (cpt = 1; cpt <= 255; cpt++) { tps = clk / cpt; bpt = (kbps + tps / 2) / tps; if (bpt > 0 && bpt <= 255) { v = bpt * tps; delta = v >= kbps ? v - kbps : kbps - v; if (delta < mindelta) { mindelta = delta; selected_cpt = cpt; selected_bpt = bpt; } } else if (selected_cpt) break; } if (!selected_cpt) return -EINVAL; } t4_write_reg(adap, A_TP_TM_PIO_ADDR, A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); v = t4_read_reg(adap, A_TP_TM_PIO_DATA); if (sched & 1) v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); else v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); t4_write_reg(adap, A_TP_TM_PIO_DATA, v); return 0; } /** * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler * @adap: the adapter * @sched: the scheduler index * @ipg: the interpacket delay in tenths of nanoseconds * * Set the interpacket delay for a HW packet rate scheduler. */ int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg) { unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; /* convert ipg to nearest number of core clocks */ ipg *= core_ticks_per_usec(adap); ipg = (ipg + 5000) / 10000; if (ipg > M_TXTIMERSEPQ0) return -EINVAL; t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); v = t4_read_reg(adap, A_TP_TM_PIO_DATA); if (sched & 1) v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg); else v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg); t4_write_reg(adap, A_TP_TM_PIO_DATA, v); t4_read_reg(adap, A_TP_TM_PIO_DATA); return 0; } /* * Calculates a rate in bytes/s given the number of 256-byte units per 4K core * clocks. The formula is * * bytes/s = bytes256 * 256 * ClkFreq / 4096 * * which is equivalent to * * bytes/s = 62.5 * bytes256 * ClkFreq_ms */ static u64 chan_rate(struct adapter *adap, unsigned int bytes256) { u64 v = (u64)bytes256 * adap->params.vpd.cclk; return v * 62 + v / 2; } /** * t4_get_chan_txrate - get the current per channel Tx rates * @adap: the adapter * @nic_rate: rates for NIC traffic * @ofld_rate: rates for offloaded traffic * * Return the current Tx rates in bytes/s for NIC and offloaded traffic * for each channel. */ void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) { u32 v; v = t4_read_reg(adap, A_TP_TX_TRATE); nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); if (adap->chip_params->nchan > 2) { nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); } v = t4_read_reg(adap, A_TP_TX_ORATE); ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); if (adap->chip_params->nchan > 2) { ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); } } /** * t4_set_trace_filter - configure one of the tracing filters * @adap: the adapter * @tp: the desired trace filter parameters * @idx: which filter to configure * @enable: whether to enable or disable the filter * * Configures one of the tracing filters available in HW. If @tp is %NULL * it indicates that the filter is already written in the register and it * just needs to be enabled or disabled. */ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx, int enable) { int i, ofst = idx * 4; u32 data_reg, mask_reg, cfg; u32 multitrc = F_TRCMULTIFILTER; u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN; if (idx < 0 || idx >= NTRACE) return -EINVAL; if (tp == NULL || !enable) { t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, enable ? en : 0); return 0; } /* * TODO - After T4 data book is updated, specify the exact * section below. * * See T4 data book - MPS section for a complete description * of the below if..else handling of A_MPS_TRC_CFG register * value. */ cfg = t4_read_reg(adap, A_MPS_TRC_CFG); if (cfg & F_TRCMULTIFILTER) { /* * If multiple tracers are enabled, then maximum * capture size is 2.5KB (FIFO size of a single channel) * minus 2 flits for CPL_TRACE_PKT header. */ if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) return -EINVAL; } else { /* * If multiple tracers are disabled, to avoid deadlocks * maximum packet capture size of 9600 bytes is recommended. * Also in this mode, only trace0 can be enabled and running. */ multitrc = 0; if (tp->snap_len > 9600 || idx) return -EINVAL; } if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 || tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE) return -EINVAL; /* stop the tracer we'll be changing */ t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0); idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); data_reg = A_MPS_TRC_FILTER0_MATCH + idx; mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { t4_write_reg(adap, data_reg, tp->data[i]); t4_write_reg(adap, mask_reg, ~tp->mask[i]); } t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, V_TFCAPTUREMAX(tp->snap_len) | V_TFMINPKTSIZE(tp->min_len)); t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en | (is_t4(adap) ? V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) : V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert))); return 0; } /** * t4_get_trace_filter - query one of the tracing filters * @adap: the adapter * @tp: the current trace filter parameters * @idx: which trace filter to query * @enabled: non-zero if the filter is enabled * * Returns the current settings of one of the HW tracing filters. */ void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, int *enabled) { u32 ctla, ctlb; int i, ofst = idx * 4; u32 data_reg, mask_reg; ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); if (is_t4(adap)) { *enabled = !!(ctla & F_TFEN); tp->port = G_TFPORT(ctla); tp->invert = !!(ctla & F_TFINVERTMATCH); } else { *enabled = !!(ctla & F_T5_TFEN); tp->port = G_T5_TFPORT(ctla); tp->invert = !!(ctla & F_T5_TFINVERTMATCH); } tp->snap_len = G_TFCAPTUREMAX(ctlb); tp->min_len = G_TFMINPKTSIZE(ctlb); tp->skip_ofst = G_TFOFFSET(ctla); tp->skip_len = G_TFLENGTH(ctla); ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { tp->mask[i] = ~t4_read_reg(adap, mask_reg); tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; } } /** * t4_pmtx_get_stats - returns the HW stats from PMTX * @adap: the adapter * @cnt: where to store the count statistics * @cycles: where to store the cycle statistics * * Returns performance statistics from PMTX. */ void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) { int i; u32 data[2]; for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); if (is_t4(adap)) cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); else { t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, data, 2, A_PM_TX_DBG_STAT_MSB); cycles[i] = (((u64)data[0] << 32) | data[1]); } } } /** * t4_pmrx_get_stats - returns the HW stats from PMRX * @adap: the adapter * @cnt: where to store the count statistics * @cycles: where to store the cycle statistics * * Returns performance statistics from PMRX. */ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) { int i; u32 data[2]; for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); if (is_t4(adap)) { cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); } else { t4_read_indirect(adap, A_PM_RX_DBG_CTRL, A_PM_RX_DBG_DATA, data, 2, A_PM_RX_DBG_STAT_MSB); cycles[i] = (((u64)data[0] << 32) | data[1]); } } } /** * t4_get_mps_bg_map - return the buffer groups associated with a port * @adap: the adapter * @idx: the port index * * Returns a bitmap indicating which MPS buffer groups are associated * with the given port. Bit i is set if buffer group i is used by the * port. */ static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) { u32 n; if (adap->params.mps_bg_map) return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff); n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); if (n == 0) return idx == 0 ? 0xf : 0; if (n == 1 && chip_id(adap) <= CHELSIO_T5) return idx < 2 ? (3 << (2 * idx)) : 0; return 1 << idx; } /* * TP RX e-channels associated with the port. */ static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx) { u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); if (n == 0) return idx == 0 ? 0xf : 0; if (n == 1 && chip_id(adap) <= CHELSIO_T5) return idx < 2 ? (3 << (2 * idx)) : 0; return 1 << idx; } /** * t4_get_port_type_description - return Port Type string description * @port_type: firmware Port Type enumeration */ const char *t4_get_port_type_description(enum fw_port_type port_type) { static const char *const port_type_description[] = { "Fiber_XFI", "Fiber_XAUI", "BT_SGMII", "BT_XFI", "BT_XAUI", "KX4", "CX4", "KX", "KR", "SFP", "BP_AP", "BP4_AP", "QSFP_10G", "QSA", "QSFP", "BP40_BA", "KR4_100G", "CR4_QSFP", "CR_QSFP", "CR2_QSFP", "SFP28", "KR_SFP28", }; if (port_type < ARRAY_SIZE(port_type_description)) return port_type_description[port_type]; return "UNKNOWN"; } /** * t4_get_port_stats_offset - collect port stats relative to a previous * snapshot * @adap: The adapter * @idx: The port * @stats: Current stats to fill * @offset: Previous stats snapshot */ void t4_get_port_stats_offset(struct adapter *adap, int idx, struct port_stats *stats, struct port_stats *offset) { u64 *s, *o; int i; t4_get_port_stats(adap, idx, stats); for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; i < (sizeof(struct port_stats)/sizeof(u64)) ; i++, s++, o++) *s -= *o; } /** * t4_get_port_stats - collect port statistics * @adap: the adapter * @idx: the port index * @p: the stats structure to fill * * Collect statistics related to the given port from HW. */ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) { u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL); #define GET_STAT(name) \ t4_read_reg64(adap, \ (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \ T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) p->tx_pause = GET_STAT(TX_PORT_PAUSE); p->tx_octets = GET_STAT(TX_PORT_BYTES); p->tx_frames = GET_STAT(TX_PORT_FRAMES); p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); p->tx_error_frames = GET_STAT(TX_PORT_ERROR); p->tx_frames_64 = GET_STAT(TX_PORT_64B); p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); p->tx_drop = GET_STAT(TX_PORT_DROP); p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); if (chip_id(adap) >= CHELSIO_T5) { if (stat_ctl & F_COUNTPAUSESTATTX) { p->tx_frames -= p->tx_pause; p->tx_octets -= p->tx_pause * 64; } if (stat_ctl & F_COUNTPAUSEMCTX) p->tx_mcast_frames -= p->tx_pause; } p->rx_pause = GET_STAT(RX_PORT_PAUSE); p->rx_octets = GET_STAT(RX_PORT_BYTES); p->rx_frames = GET_STAT(RX_PORT_FRAMES); p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); p->rx_runt = GET_STAT(RX_PORT_LESS_64B); p->rx_frames_64 = GET_STAT(RX_PORT_64B); p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); if (chip_id(adap) >= CHELSIO_T5) { if (stat_ctl & F_COUNTPAUSESTATRX) { p->rx_frames -= p->rx_pause; p->rx_octets -= p->rx_pause * 64; } if (stat_ctl & F_COUNTPAUSEMCRX) p->rx_mcast_frames -= p->rx_pause; } p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; #undef GET_STAT #undef GET_STAT_COM } /** * t4_get_lb_stats - collect loopback port statistics * @adap: the adapter * @idx: the loopback port index * @p: the stats structure to fill * * Return HW statistics for the given loopback port. */ void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) { u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; #define GET_STAT(name) \ t4_read_reg64(adap, \ (is_t4(adap) ? \ PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \ T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))) #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) p->octets = GET_STAT(BYTES); p->frames = GET_STAT(FRAMES); p->bcast_frames = GET_STAT(BCAST); p->mcast_frames = GET_STAT(MCAST); p->ucast_frames = GET_STAT(UCAST); p->error_frames = GET_STAT(ERROR); p->frames_64 = GET_STAT(64B); p->frames_65_127 = GET_STAT(65B_127B); p->frames_128_255 = GET_STAT(128B_255B); p->frames_256_511 = GET_STAT(256B_511B); p->frames_512_1023 = GET_STAT(512B_1023B); p->frames_1024_1518 = GET_STAT(1024B_1518B); p->frames_1519_max = GET_STAT(1519B_MAX); p->drop = GET_STAT(DROP_FRAMES); p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; #undef GET_STAT #undef GET_STAT_COM } /** * t4_wol_magic_enable - enable/disable magic packet WoL * @adap: the adapter * @port: the physical port index * @addr: MAC address expected in magic packets, %NULL to disable * * Enables/disables magic packet wake-on-LAN for the selected port. */ void t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr) { u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; if (is_t4(adap)) { mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO); mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI); port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); } else { mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO); mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI); port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); } if (addr) { t4_write_reg(adap, mag_id_reg_l, (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]); t4_write_reg(adap, mag_id_reg_h, (addr[0] << 8) | addr[1]); } t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN, V_MAGICEN(addr != NULL)); } /** * t4_wol_pat_enable - enable/disable pattern-based WoL * @adap: the adapter * @port: the physical port index * @map: bitmap of which HW pattern filters to set * @mask0: byte mask for bytes 0-63 of a packet * @mask1: byte mask for bytes 64-127 of a packet * @crc: Ethernet CRC for selected bytes * @enable: enable/disable switch * * Sets the pattern filters indicated in @map to mask out the bytes * specified in @mask0/@mask1 in received packets and compare the CRC of * the resulting packet against @crc. If @enable is %true pattern-based * WoL is enabled, otherwise disabled. */ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, u64 mask0, u64 mask1, unsigned int crc, bool enable) { int i; u32 port_cfg_reg; if (is_t4(adap)) port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); else port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); if (!enable) { t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0); return 0; } if (map > 0xff) return -EINVAL; #define EPIO_REG(name) \ (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \ T5_PORT_REG(port, A_MAC_PORT_EPIO_##name)) t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); t4_write_reg(adap, EPIO_REG(DATA2), mask1); t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); for (i = 0; i < NWOL_PAT; i++, map >>= 1) { if (!(map & 1)) continue; /* write byte masks */ t4_write_reg(adap, EPIO_REG(DATA0), mask0); t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR); t4_read_reg(adap, EPIO_REG(OP)); /* flush */ if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) return -ETIMEDOUT; /* write CRC */ t4_write_reg(adap, EPIO_REG(DATA0), crc); t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR); t4_read_reg(adap, EPIO_REG(OP)); /* flush */ if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) return -ETIMEDOUT; } #undef EPIO_REG t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN); return 0; } /* t4_mk_filtdelwr - create a delete filter WR * @ftid: the filter ID * @wr: the filter work request to populate * @qid: ingress queue to receive the delete notification * * Creates a filter work request to delete the supplied filter. If @qid is * negative the delete notification is suppressed. */ void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) { memset(wr, 0, sizeof(*wr)); wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16)); wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) | V_FW_FILTER_WR_NOREPLY(qid < 0)); wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER); if (qid >= 0) wr->rx_chan_rx_rpl_iq = cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid)); } #define INIT_CMD(var, cmd, rd_wr) do { \ (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ F_FW_CMD_REQUEST | \ F_FW_CMD_##rd_wr); \ (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ } while (0) int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val) { u32 ldst_addrspace; struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE); c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | ldst_addrspace); c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c.u.addrval.addr = cpu_to_be32(addr); c.u.addrval.val = cpu_to_be32(val); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_mdio_rd - read a PHY register through MDIO * @adap: the adapter * @mbox: mailbox to use for the FW command * @phy_addr: the PHY address * @mmd: the PHY MMD to access (0 for clause 22 PHYs) * @reg: the register to read * @valp: where to store the value * * Issues a FW command through the given mailbox to read a PHY register. */ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, unsigned int *valp) { int ret; u32 ldst_addrspace; struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | ldst_addrspace); c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | V_FW_LDST_CMD_MMD(mmd)); c.u.mdio.raddr = cpu_to_be16(reg); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) *valp = be16_to_cpu(c.u.mdio.rval); return ret; } /** * t4_mdio_wr - write a PHY register through MDIO * @adap: the adapter * @mbox: mailbox to use for the FW command * @phy_addr: the PHY address * @mmd: the PHY MMD to access (0 for clause 22 PHYs) * @reg: the register to write * @valp: value to write * * Issues a FW command through the given mailbox to write a PHY register. */ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, unsigned int val) { u32 ldst_addrspace; struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | ldst_addrspace); c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | V_FW_LDST_CMD_MMD(mmd)); c.u.mdio.raddr = cpu_to_be16(reg); c.u.mdio.rval = cpu_to_be16(val); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * * t4_sge_decode_idma_state - decode the idma state * @adap: the adapter * @state: the state idma is stuck in */ void t4_sge_decode_idma_state(struct adapter *adapter, int state) { static const char * const t4_decode[] = { "IDMA_IDLE", "IDMA_PUSH_MORE_CPL_FIFO", "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", "Not used", "IDMA_PHYSADDR_SEND_PCIEHDR", "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", "IDMA_PHYSADDR_SEND_PAYLOAD", "IDMA_SEND_FIFO_TO_IMSG", "IDMA_FL_REQ_DATA_FL_PREP", "IDMA_FL_REQ_DATA_FL", "IDMA_FL_DROP", "IDMA_FL_H_REQ_HEADER_FL", "IDMA_FL_H_SEND_PCIEHDR", "IDMA_FL_H_PUSH_CPL_FIFO", "IDMA_FL_H_SEND_CPL", "IDMA_FL_H_SEND_IP_HDR_FIRST", "IDMA_FL_H_SEND_IP_HDR", "IDMA_FL_H_REQ_NEXT_HEADER_FL", "IDMA_FL_H_SEND_NEXT_PCIEHDR", "IDMA_FL_H_SEND_IP_HDR_PADDING", "IDMA_FL_D_SEND_PCIEHDR", "IDMA_FL_D_SEND_CPL_AND_IP_HDR", "IDMA_FL_D_REQ_NEXT_DATA_FL", "IDMA_FL_SEND_PCIEHDR", "IDMA_FL_PUSH_CPL_FIFO", "IDMA_FL_SEND_CPL", "IDMA_FL_SEND_PAYLOAD_FIRST", "IDMA_FL_SEND_PAYLOAD", "IDMA_FL_REQ_NEXT_DATA_FL", "IDMA_FL_SEND_NEXT_PCIEHDR", "IDMA_FL_SEND_PADDING", "IDMA_FL_SEND_COMPLETION_TO_IMSG", "IDMA_FL_SEND_FIFO_TO_IMSG", "IDMA_FL_REQ_DATAFL_DONE", "IDMA_FL_REQ_HEADERFL_DONE", }; static const char * const t5_decode[] = { "IDMA_IDLE", "IDMA_ALMOST_IDLE", "IDMA_PUSH_MORE_CPL_FIFO", "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", "IDMA_PHYSADDR_SEND_PCIEHDR", "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", "IDMA_PHYSADDR_SEND_PAYLOAD", "IDMA_SEND_FIFO_TO_IMSG", "IDMA_FL_REQ_DATA_FL", "IDMA_FL_DROP", "IDMA_FL_DROP_SEND_INC", "IDMA_FL_H_REQ_HEADER_FL", "IDMA_FL_H_SEND_PCIEHDR", "IDMA_FL_H_PUSH_CPL_FIFO", "IDMA_FL_H_SEND_CPL", "IDMA_FL_H_SEND_IP_HDR_FIRST", "IDMA_FL_H_SEND_IP_HDR", "IDMA_FL_H_REQ_NEXT_HEADER_FL", "IDMA_FL_H_SEND_NEXT_PCIEHDR", "IDMA_FL_H_SEND_IP_HDR_PADDING", "IDMA_FL_D_SEND_PCIEHDR", "IDMA_FL_D_SEND_CPL_AND_IP_HDR", "IDMA_FL_D_REQ_NEXT_DATA_FL", "IDMA_FL_SEND_PCIEHDR", "IDMA_FL_PUSH_CPL_FIFO", "IDMA_FL_SEND_CPL", "IDMA_FL_SEND_PAYLOAD_FIRST", "IDMA_FL_SEND_PAYLOAD", "IDMA_FL_REQ_NEXT_DATA_FL", "IDMA_FL_SEND_NEXT_PCIEHDR", "IDMA_FL_SEND_PADDING", "IDMA_FL_SEND_COMPLETION_TO_IMSG", }; static const char * const t6_decode[] = { "IDMA_IDLE", "IDMA_PUSH_MORE_CPL_FIFO", "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", "IDMA_PHYSADDR_SEND_PCIEHDR", "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", "IDMA_PHYSADDR_SEND_PAYLOAD", "IDMA_FL_REQ_DATA_FL", "IDMA_FL_DROP", "IDMA_FL_DROP_SEND_INC", "IDMA_FL_H_REQ_HEADER_FL", "IDMA_FL_H_SEND_PCIEHDR", "IDMA_FL_H_PUSH_CPL_FIFO", "IDMA_FL_H_SEND_CPL", "IDMA_FL_H_SEND_IP_HDR_FIRST", "IDMA_FL_H_SEND_IP_HDR", "IDMA_FL_H_REQ_NEXT_HEADER_FL", "IDMA_FL_H_SEND_NEXT_PCIEHDR", "IDMA_FL_H_SEND_IP_HDR_PADDING", "IDMA_FL_D_SEND_PCIEHDR", "IDMA_FL_D_SEND_CPL_AND_IP_HDR", "IDMA_FL_D_REQ_NEXT_DATA_FL", "IDMA_FL_SEND_PCIEHDR", "IDMA_FL_PUSH_CPL_FIFO", "IDMA_FL_SEND_CPL", "IDMA_FL_SEND_PAYLOAD_FIRST", "IDMA_FL_SEND_PAYLOAD", "IDMA_FL_REQ_NEXT_DATA_FL", "IDMA_FL_SEND_NEXT_PCIEHDR", "IDMA_FL_SEND_PADDING", "IDMA_FL_SEND_COMPLETION_TO_IMSG", }; static const u32 sge_regs[] = { A_SGE_DEBUG_DATA_LOW_INDEX_2, A_SGE_DEBUG_DATA_LOW_INDEX_3, A_SGE_DEBUG_DATA_HIGH_INDEX_10, }; const char * const *sge_idma_decode; int sge_idma_decode_nstates; int i; unsigned int chip_version = chip_id(adapter); /* Select the right set of decode strings to dump depending on the * adapter chip type. */ switch (chip_version) { case CHELSIO_T4: sge_idma_decode = (const char * const *)t4_decode; sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); break; case CHELSIO_T5: sge_idma_decode = (const char * const *)t5_decode; sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); break; case CHELSIO_T6: sge_idma_decode = (const char * const *)t6_decode; sge_idma_decode_nstates = ARRAY_SIZE(t6_decode); break; default: CH_ERR(adapter, "Unsupported chip version %d\n", chip_version); return; } if (state < sge_idma_decode_nstates) CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); else CH_WARN(adapter, "idma state %d unknown\n", state); for (i = 0; i < ARRAY_SIZE(sge_regs); i++) CH_WARN(adapter, "SGE register %#x value %#x\n", sge_regs[i], t4_read_reg(adapter, sge_regs[i])); } /** * t4_sge_ctxt_flush - flush the SGE context cache * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a FW command through the given mailbox to flush the * SGE context cache. */ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) { int ret; u32 ldst_addrspace; struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC); c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | ldst_addrspace); c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); return ret; } /** * t4_fw_hello - establish communication with FW * @adap: the adapter * @mbox: mailbox to use for the FW command * @evt_mbox: mailbox to receive async FW events * @master: specifies the caller's willingness to be the device master * @state: returns the current device state (if non-NULL) * * Issues a command to establish communication with FW. Returns either * an error (negative integer) or the mailbox of the Master PF. */ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, enum dev_master master, enum dev_state *state) { int ret; struct fw_hello_cmd c; u32 v; unsigned int master_mbox; int retries = FW_CMD_HELLO_RETRIES; retry: memset(&c, 0, sizeof(c)); INIT_CMD(c, HELLO, WRITE); c.err_to_clearinit = cpu_to_be32( V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : M_FW_HELLO_CMD_MBMASTER) | V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | F_FW_HELLO_CMD_CLEARINIT); /* * Issue the HELLO command to the firmware. If it's not successful * but indicates that we got a "busy" or "timeout" condition, retry * the HELLO until we exhaust our retry limit. If we do exceed our * retry limit, check to see if the firmware left us any error * information and report that if so ... */ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret != FW_SUCCESS) { if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) goto retry; if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) t4_report_fw_error(adap); return ret; } v = be32_to_cpu(c.err_to_clearinit); master_mbox = G_FW_HELLO_CMD_MBMASTER(v); if (state) { if (v & F_FW_HELLO_CMD_ERR) *state = DEV_STATE_ERR; else if (v & F_FW_HELLO_CMD_INIT) *state = DEV_STATE_INIT; else *state = DEV_STATE_UNINIT; } /* * If we're not the Master PF then we need to wait around for the * Master PF Driver to finish setting up the adapter. * * Note that we also do this wait if we're a non-Master-capable PF and * there is no current Master PF; a Master PF may show up momentarily * and we wouldn't want to fail pointlessly. (This can happen when an * OS loads lots of different drivers rapidly at the same time). In * this case, the Master PF returned by the firmware will be * M_PCIE_FW_MASTER so the test below will work ... */ if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && master_mbox != mbox) { int waiting = FW_CMD_HELLO_TIMEOUT; /* * Wait for the firmware to either indicate an error or * initialized state. If we see either of these we bail out * and report the issue to the caller. If we exhaust the * "hello timeout" and we haven't exhausted our retries, try * again. Otherwise bail with a timeout error. */ for (;;) { u32 pcie_fw; msleep(50); waiting -= 50; /* * If neither Error nor Initialialized are indicated * by the firmware keep waiting till we exhaust our * timeout ... and then retry if we haven't exhausted * our retries ... */ pcie_fw = t4_read_reg(adap, A_PCIE_FW); if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { if (waiting <= 0) { if (retries-- > 0) goto retry; return -ETIMEDOUT; } continue; } /* * We either have an Error or Initialized condition * report errors preferentially. */ if (state) { if (pcie_fw & F_PCIE_FW_ERR) *state = DEV_STATE_ERR; else if (pcie_fw & F_PCIE_FW_INIT) *state = DEV_STATE_INIT; } /* * If we arrived before a Master PF was selected and * there's not a valid Master PF, grab its identity * for our caller. */ if (master_mbox == M_PCIE_FW_MASTER && (pcie_fw & F_PCIE_FW_MASTER_VLD)) master_mbox = G_PCIE_FW_MASTER(pcie_fw); break; } } return master_mbox; } /** * t4_fw_bye - end communication with FW * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a command to terminate communication with FW. */ int t4_fw_bye(struct adapter *adap, unsigned int mbox) { struct fw_bye_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, BYE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_fw_reset - issue a reset to FW * @adap: the adapter * @mbox: mailbox to use for the FW command * @reset: specifies the type of reset to perform * * Issues a reset command of the specified type to FW. */ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) { struct fw_reset_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, RESET, WRITE); c.val = cpu_to_be32(reset); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_fw_halt - issue a reset/halt to FW and put uP into RESET * @adap: the adapter * @mbox: mailbox to use for the FW RESET command (if desired) * @force: force uP into RESET even if FW RESET command fails * * Issues a RESET command to firmware (if desired) with a HALT indication * and then puts the microprocessor into RESET state. The RESET command * will only be issued if a legitimate mailbox is provided (mbox <= * M_PCIE_FW_MASTER). * * This is generally used in order for the host to safely manipulate the * adapter without fear of conflicting with whatever the firmware might * be doing. The only way out of this state is to RESTART the firmware * ... */ int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) { int ret = 0; /* * If a legitimate mailbox is provided, issue a RESET command * with a HALT indication. */ if (adap->flags & FW_OK && mbox <= M_PCIE_FW_MASTER) { struct fw_reset_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, RESET, WRITE); c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /* * Normally we won't complete the operation if the firmware RESET * command fails but if our caller insists we'll go ahead and put the * uP into RESET. This can be useful if the firmware is hung or even * missing ... We'll have to take the risk of putting the uP into * RESET without the cooperation of firmware in that case. * * We also force the firmware's HALT flag to be on in case we bypassed * the firmware RESET command above or we're dealing with old firmware * which doesn't have the HALT capability. This will serve as a flag * for the incoming firmware to know that it's coming out of a HALT * rather than a RESET ... if it's new enough to understand that ... */ if (ret == 0 || force) { t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT); } /* * And we always return the result of the firmware RESET command * even when we force the uP into RESET ... */ return ret; } /** * t4_fw_restart - restart the firmware by taking the uP out of RESET * @adap: the adapter * * Restart firmware previously halted by t4_fw_halt(). On successful * return the previous PF Master remains as the new PF Master and there * is no need to issue a new HELLO command, etc. */ int t4_fw_restart(struct adapter *adap, unsigned int mbox) { int ms; t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) return FW_SUCCESS; msleep(100); ms += 100; } return -ETIMEDOUT; } /** * t4_fw_upgrade - perform all of the steps necessary to upgrade FW * @adap: the adapter * @mbox: mailbox to use for the FW RESET command (if desired) * @fw_data: the firmware image to write * @size: image size * @force: force upgrade even if firmware doesn't cooperate * * Perform all of the steps necessary for upgrading an adapter's * firmware image. Normally this requires the cooperation of the * existing firmware in order to halt all existing activities * but if an invalid mailbox token is passed in we skip that step * (though we'll still put the adapter microprocessor into RESET in * that case). * * On successful return the new firmware will have been loaded and * the adapter will have been fully RESET losing all previous setup * state. On unsuccessful return the adapter may be completely hosed ... * positive errno indicates that the adapter is ~probably~ intact, a * negative errno indicates that things are looking bad ... */ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, const u8 *fw_data, unsigned int size, int force) { const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; unsigned int bootstrap = be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; int ret; if (!t4_fw_matches_chip(adap, fw_hdr)) return -EINVAL; if (!bootstrap) { ret = t4_fw_halt(adap, mbox, force); if (ret < 0 && !force) return ret; } ret = t4_load_fw(adap, fw_data, size); if (ret < 0 || bootstrap) return ret; return t4_fw_restart(adap, mbox); } /** * t4_fw_initialize - ask FW to initialize the device * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a command to FW to partially initialize the device. This * performs initialization that generally doesn't depend on user input. */ int t4_fw_initialize(struct adapter *adap, unsigned int mbox) { struct fw_initialize_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, INITIALIZE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_query_params_rw - query FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF * @vf: the VF * @nparams: the number of parameters * @params: the parameter names * @val: the parameter values * @rw: Write and read flag * * Reads the value of FW or device parameters. Up to 7 parameters can be * queried at once. */ int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val, int rw) { int i, ret; struct fw_params_cmd c; __be32 *p = &c.param[0].mnem; if (nparams > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) | V_FW_PARAMS_CMD_VFN(vf)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); for (i = 0; i < nparams; i++) { *p++ = cpu_to_be32(*params++); if (rw) *p = cpu_to_be32(*(val + i)); p++; } ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) *val++ = be32_to_cpu(*p); return ret; } int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val) { return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0); } /** * t4_set_params_timeout - sets FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF * @vf: the VF * @nparams: the number of parameters * @params: the parameter names * @val: the parameter values * @timeout: the timeout time * * Sets the value of FW or device parameters. Up to 7 parameters can be * specified at once. */ int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val, int timeout) { struct fw_params_cmd c; __be32 *p = &c.param[0].mnem; if (nparams > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) | V_FW_PARAMS_CMD_VFN(vf)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); while (nparams--) { *p++ = cpu_to_be32(*params++); *p++ = cpu_to_be32(*val++); } return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); } /** * t4_set_params - sets FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF * @vf: the VF * @nparams: the number of parameters * @params: the parameter names * @val: the parameter values * * Sets the value of FW or device parameters. Up to 7 parameters can be * specified at once. */ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val) { return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, FW_CMD_MAX_TIMEOUT); } /** * t4_cfg_pfvf - configure PF/VF resource limits * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF being configured * @vf: the VF being configured * @txq: the max number of egress queues * @txq_eth_ctrl: the max number of egress Ethernet or control queues * @rxqi: the max number of interrupt-capable ingress queues * @rxq: the max number of interruptless ingress queues * @tc: the PCI traffic class * @vi: the max number of virtual interfaces * @cmask: the channel access rights mask for the PF/VF * @pmask: the port access rights mask for the PF/VF * @nexact: the maximum number of exact MPS filters * @rcaps: read capabilities * @wxcaps: write/execute capabilities * * Configures resource limits and capabilities for a physical or virtual * function. */ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi, unsigned int cmask, unsigned int pmask, unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) { struct fw_pfvf_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | V_FW_PFVF_CMD_VFN(vf)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) | V_FW_PFVF_CMD_NIQ(rxq)); c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) | V_FW_PFVF_CMD_PMASK(pmask) | V_FW_PFVF_CMD_NEQ(txq)); c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) | V_FW_PFVF_CMD_NEXACTF(nexact)); c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) | V_FW_PFVF_CMD_WX_CAPS(wxcaps) | V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_alloc_vi_func - allocate a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @port: physical port associated with the VI * @pf: the PF owning the VI * @vf: the VF owning the VI * @nmac: number of MAC addresses needed (1 to 5) * @mac: the MAC addresses of the VI * @rss_size: size of RSS table slice associated with this VI * @portfunc: which Port Application Function MAC Address is desired * @idstype: Intrusion Detection Type * * Allocates a virtual interface for the given physical port. If @mac is * not %NULL it contains the MAC addresses of the VI as assigned by FW. * If @rss_size is %NULL the VI is not assigned any RSS slice by FW. * @mac should be large enough to hold @nmac Ethernet addresses, they are * stored consecutively so the space needed is @nmac * 6 bytes. * Returns a negative error number or the non-negative VI id. */ int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, u16 *rss_size, + uint8_t *vfvld, uint16_t *vin, unsigned int portfunc, unsigned int idstype) { int ret; struct fw_vi_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | V_FW_VI_CMD_FUNC(portfunc)); c.portid_pkd = V_FW_VI_CMD_PORTID(port); c.nmac = nmac - 1; if(!rss_size) c.norss_rsssize = F_FW_VI_CMD_NORSS; ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret) return ret; + ret = G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid)); if (mac) { memcpy(mac, c.mac, sizeof(c.mac)); switch (nmac) { case 5: memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); case 4: memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); case 3: memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); case 2: memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); } } if (rss_size) *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); - return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid)); + if (vfvld) { + *vfvld = adap->params.viid_smt_extn_support ? + G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)) : + G_FW_VIID_VIVLD(ret); + } + if (vin) { + *vin = adap->params.viid_smt_extn_support ? + G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)) : + G_FW_VIID_VIN(ret); + } + + return ret; } /** * t4_alloc_vi - allocate an [Ethernet Function] virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @port: physical port associated with the VI * @pf: the PF owning the VI * @vf: the VF owning the VI * @nmac: number of MAC addresses needed (1 to 5) * @mac: the MAC addresses of the VI * @rss_size: size of RSS table slice associated with this VI * * backwards compatible and convieniance routine to allocate a Virtual * Interface with a Ethernet Port Application Function and Intrustion * Detection System disabled. */ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, - u16 *rss_size) + u16 *rss_size, uint8_t *vfvld, uint16_t *vin) { return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, - FW_VI_FUNC_ETH, 0); + vfvld, vin, FW_VI_FUNC_ETH, 0); } /** * t4_free_vi - free a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the VI * @vf: the VF owning the VI * @viid: virtual interface identifiler * * Free a previously allocated virtual interface. */ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int viid) { struct fw_vi_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); } /** * t4_set_rxmode - set Rx properties of a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @mtu: the new MTU or -1 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change * @sleep_ok: if true we may sleep while awaiting command completion * * Sets Rx properties of a virtual interface. */ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok) { struct fw_vi_rxmode_cmd c; /* convert to FW values */ if (mtu < 0) mtu = M_FW_VI_RXMODE_CMD_MTU; if (promisc < 0) promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; if (all_multi < 0) all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; if (bcast < 0) bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; if (vlanex < 0) vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } /** * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @free: if true any existing filters for this VI id are first removed * @naddr: the number of MAC addresses to allocate filters for (up to 7) * @addr: the MAC address(es) * @idx: where to store the index of each allocated filter * @hash: pointer to hash address filter bitmap * @sleep_ok: call is allowed to sleep * * Allocates an exact-match filter for each of the supplied addresses and * sets it to the corresponding address. If @idx is not %NULL it should * have at least @naddr entries, each of which will be set to the index of * the filter allocated for the corresponding MAC address. If a filter * could not be allocated for an address its index is set to 0xffff. * If @hash is not %NULL addresses that fail to allocate an exact filter * are hashed and update the hash filter bitmap pointed at by @hash. * * Returns a negative error number or the number of filters allocated. */ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) { int offset, ret = 0; struct fw_vi_mac_cmd c; unsigned int nfilters = 0; unsigned int max_naddr = adap->chip_params->mps_tcam_size; unsigned int rem = naddr; if (naddr > max_naddr) return -EINVAL; for (offset = 0; offset < naddr ; /**/) { unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ? rem : ARRAY_SIZE(c.u.exact)); size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, u.exact[fw_naddr]), 16); struct fw_vi_mac_exact *p; int i; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_CMD_EXEC(free) | V_FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) | V_FW_CMD_LEN16(len16)); for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); } /* * It's okay if we run out of space in our MAC address arena. * Some of the addresses we submit may get stored so we need * to run through the reply to see what the results were ... */ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); if (ret && ret != -FW_ENOMEM) break; for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { u16 index = G_FW_VI_MAC_CMD_IDX( be16_to_cpu(p->valid_to_idx)); if (idx) idx[offset+i] = (index >= max_naddr ? 0xffff : index); if (index < max_naddr) nfilters++; else if (hash) *hash |= (1ULL << hash_mac_addr(addr[offset+i])); } free = false; offset += fw_naddr; rem -= fw_naddr; } if (ret == 0 || ret == -FW_ENOMEM) ret = nfilters; return ret; } /** * t4_change_mac - modifies the exact-match filter for a MAC address * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @idx: index of existing filter for old value of MAC address, or -1 * @addr: the new MAC address value * @persist: whether a new MAC allocation should be persistent - * @add_smt: if true also add the address to the HW SMT + * @smt_idx: add MAC to SMT and return its index, or NULL * * Modifies an exact-match filter and sets it to the new MAC address if * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the * latter case the address is added persistently if @persist is %true. * * Note that in general it is not possible to modify the value of a given * filter so the generic way to modify an address filter is to free the one * being used by the old address value and allocate a new filter for the * new address value. * * Returns a negative error number or the index of the filter with the new * MAC value. Note that this index may differ from @idx. */ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, - int idx, const u8 *addr, bool persist, bool add_smt) + int idx, const u8 *addr, bool persist, uint16_t *smt_idx) { int ret, mode; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; if (idx < 0) /* new allocation */ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; - mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; + mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1)); p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | V_FW_VI_MAC_CMD_IDX(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); if (ret >= max_mac_addr) ret = -ENOMEM; + if (smt_idx) { + if (adap->params.viid_smt_extn_support) + *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid)); + else { + if (chip_id(adap) <= CHELSIO_T5) + *smt_idx = (viid & M_FW_VIID_VIN) << 1; + else + *smt_idx = viid & M_FW_VIID_VIN; + } + } } return ret; } /** * t4_set_addr_hash - program the MAC inexact-match hash filter * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @ucast: whether the hash filter should also match unicast addresses * @vec: the value to be written to the hash filter * @sleep_ok: call is allowed to sleep * * Sets the 64-bit inexact-match hash filter for a virtual interface. */ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, bool ucast, u64 vec, bool sleep_ok) { struct fw_vi_mac_cmd c; u32 val; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid)); val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) | V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1); c.freemacs_to_len16 = cpu_to_be32(val); c.u.hash.hashvec = cpu_to_be64(vec); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } /** * t4_enable_vi_params - enable/disable a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @rx_en: 1=enable Rx, 0=disable Rx * @tx_en: 1=enable Tx, 0=disable Tx * @dcb_en: 1=enable delivery of Data Center Bridging messages. * * Enables/disables a virtual interface. Note that setting DCB Enable * only makes sense when enabling a Virtual Interface ... */ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) { struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | V_FW_VI_ENABLE_CMD_EEN(tx_en) | V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | FW_LEN16(c)); return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); } /** * t4_enable_vi - enable/disable a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @rx_en: 1=enable Rx, 0=disable Rx * @tx_en: 1=enable Tx, 0=disable Tx * * Enables/disables a virtual interface. Note that setting DCB Enable * only makes sense when enabling a Virtual Interface ... */ int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en) { return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); } /** * t4_identify_port - identify a VI's port by blinking its LED * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @nblinks: how many times to blink LED at 2.5 Hz * * Identifies a VI's port by blinking its LED. */ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, unsigned int nblinks) { struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); c.blinkdur = cpu_to_be16(nblinks); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_iq_stop - stop an ingress queue and its FLs * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queues * @vf: the VF owning the queues * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) * @iqid: ingress queue id * @fl0id: FL0 queue id or 0xffff if no attached FL0 * @fl1id: FL1 queue id or 0xffff if no attached FL1 * * Stops an ingress queue and its associated FLs, if any. This causes * any current or future data/messages destined for these queues to be * tossed. */ int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id) { struct fw_iq_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | V_FW_IQ_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c)); c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); c.iqid = cpu_to_be16(iqid); c.fl0id = cpu_to_be16(fl0id); c.fl1id = cpu_to_be16(fl1id); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_iq_free - free an ingress queue and its FLs * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queues * @vf: the VF owning the queues * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) * @iqid: ingress queue id * @fl0id: FL0 queue id or 0xffff if no attached FL0 * @fl1id: FL1 queue id or 0xffff if no attached FL1 * * Frees an ingress queue and its associated FLs, if any. */ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id) { struct fw_iq_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | V_FW_IQ_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); c.iqid = cpu_to_be16(iqid); c.fl0id = cpu_to_be16(fl0id); c.fl1id = cpu_to_be16(fl1id); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_eth_eq_free - free an Ethernet egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees an Ethernet egress queue. */ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_eth_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) | V_FW_EQ_ETH_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_ctrl_eq_free - free a control egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees a control egress queue. */ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_ctrl_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) | V_FW_EQ_CTRL_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_ofld_eq_free - free an offload egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees a control egress queue. */ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_ofld_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) | V_FW_EQ_OFLD_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_link_down_rc_str - return a string for a Link Down Reason Code * @link_down_rc: Link Down Reason Code * * Returns a string representation of the Link Down Reason Code. */ const char *t4_link_down_rc_str(unsigned char link_down_rc) { static const char *reason[] = { "Link Down", "Remote Fault", "Auto-negotiation Failure", "Reserved3", "Insufficient Airflow", "Unable To Determine Reason", "No RX Signal Detected", "Reserved7", }; if (link_down_rc >= ARRAY_SIZE(reason)) return "Bad Reason Code"; return reason[link_down_rc]; } /* * Return the highest speed set in the port capabilities, in Mb/s. */ unsigned int fwcap_to_speed(uint32_t caps) { #define TEST_SPEED_RETURN(__caps_speed, __speed) \ do { \ if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ return __speed; \ } while (0) TEST_SPEED_RETURN(400G, 400000); TEST_SPEED_RETURN(200G, 200000); TEST_SPEED_RETURN(100G, 100000); TEST_SPEED_RETURN(50G, 50000); TEST_SPEED_RETURN(40G, 40000); TEST_SPEED_RETURN(25G, 25000); TEST_SPEED_RETURN(10G, 10000); TEST_SPEED_RETURN(1G, 1000); TEST_SPEED_RETURN(100M, 100); #undef TEST_SPEED_RETURN return 0; } /* * Return the port capabilities bit for the given speed, which is in Mb/s. */ uint32_t speed_to_fwcap(unsigned int speed) { #define TEST_SPEED_RETURN(__caps_speed, __speed) \ do { \ if (speed == __speed) \ return FW_PORT_CAP32_SPEED_##__caps_speed; \ } while (0) TEST_SPEED_RETURN(400G, 400000); TEST_SPEED_RETURN(200G, 200000); TEST_SPEED_RETURN(100G, 100000); TEST_SPEED_RETURN(50G, 50000); TEST_SPEED_RETURN(40G, 40000); TEST_SPEED_RETURN(25G, 25000); TEST_SPEED_RETURN(10G, 10000); TEST_SPEED_RETURN(1G, 1000); TEST_SPEED_RETURN(100M, 100); #undef TEST_SPEED_RETURN return 0; } /* * Return the port capabilities bit for the highest speed in the capabilities. */ uint32_t fwcap_top_speed(uint32_t caps) { #define TEST_SPEED_RETURN(__caps_speed) \ do { \ if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ return FW_PORT_CAP32_SPEED_##__caps_speed; \ } while (0) TEST_SPEED_RETURN(400G); TEST_SPEED_RETURN(200G); TEST_SPEED_RETURN(100G); TEST_SPEED_RETURN(50G); TEST_SPEED_RETURN(40G); TEST_SPEED_RETURN(25G); TEST_SPEED_RETURN(10G); TEST_SPEED_RETURN(1G); TEST_SPEED_RETURN(100M); #undef TEST_SPEED_RETURN return 0; } /** * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value * * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new * 32-bit Port Capabilities value. */ static uint32_t lstatus_to_fwcap(u32 lstatus) { uint32_t linkattr = 0; /* * Unfortunately the format of the Link Status in the old * 16-bit Port Information message isn't the same as the * 16-bit Port Capabilities bitfield used everywhere else ... */ if (lstatus & F_FW_PORT_CMD_RXPAUSE) linkattr |= FW_PORT_CAP32_FC_RX; if (lstatus & F_FW_PORT_CMD_TXPAUSE) linkattr |= FW_PORT_CAP32_FC_TX; if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) linkattr |= FW_PORT_CAP32_SPEED_100M; if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) linkattr |= FW_PORT_CAP32_SPEED_1G; if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) linkattr |= FW_PORT_CAP32_SPEED_10G; if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) linkattr |= FW_PORT_CAP32_SPEED_25G; if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) linkattr |= FW_PORT_CAP32_SPEED_40G; if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) linkattr |= FW_PORT_CAP32_SPEED_100G; return linkattr; } /* * Updates all fields owned by the common code in port_info and link_config * based on information provided by the firmware. Does not touch any * requested_* field. */ static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p, enum fw_port_action action, bool *mod_changed, bool *link_changed) { struct link_config old_lc, *lc = &pi->link_cfg; unsigned char fc, fec; u32 stat, linkattr; int old_ptype, old_mtype; old_ptype = pi->port_type; old_mtype = pi->mod_type; old_lc = *lc; if (action == FW_PORT_ACTION_GET_PORT_INFO) { stat = be32_to_cpu(p->u.info.lstatus_to_modtype); pi->port_type = G_FW_PORT_CMD_PTYPE(stat); pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat); pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ? G_FW_PORT_CMD_MDIOADDR(stat) : -1; lc->supported = fwcaps16_to_caps32(be16_to_cpu(p->u.info.pcap)); lc->advertising = fwcaps16_to_caps32(be16_to_cpu(p->u.info.acap)); lc->lp_advertising = fwcaps16_to_caps32(be16_to_cpu(p->u.info.lpacap)); lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat); linkattr = lstatus_to_fwcap(stat); } else if (action == FW_PORT_ACTION_GET_PORT_INFO32) { stat = be32_to_cpu(p->u.info32.lstatus32_to_cbllen32); pi->port_type = G_FW_PORT_CMD_PORTTYPE32(stat); pi->mod_type = G_FW_PORT_CMD_MODTYPE32(stat); pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP32 ? G_FW_PORT_CMD_MDIOADDR32(stat) : -1; lc->supported = be32_to_cpu(p->u.info32.pcaps32); lc->advertising = be32_to_cpu(p->u.info32.acaps32); lc->lp_advertising = be16_to_cpu(p->u.info32.lpacaps32); lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS32) != 0; lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC32(stat); linkattr = be32_to_cpu(p->u.info32.linkattr32); } else { CH_ERR(pi->adapter, "bad port_info action 0x%x\n", action); return; } lc->speed = fwcap_to_speed(linkattr); fc = 0; if (linkattr & FW_PORT_CAP32_FC_RX) fc |= PAUSE_RX; if (linkattr & FW_PORT_CAP32_FC_TX) fc |= PAUSE_TX; lc->fc = fc; fec = FEC_NONE; if (linkattr & FW_PORT_CAP32_FEC_RS) fec |= FEC_RS; if (linkattr & FW_PORT_CAP32_FEC_BASER_RS) fec |= FEC_BASER_RS; lc->fec = fec; if (mod_changed != NULL) *mod_changed = false; if (link_changed != NULL) *link_changed = false; if (old_ptype != pi->port_type || old_mtype != pi->mod_type || old_lc.supported != lc->supported) { if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) { lc->fec_hint = lc->advertising & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC); } if (mod_changed != NULL) *mod_changed = true; } if (old_lc.link_ok != lc->link_ok || old_lc.speed != lc->speed || old_lc.fec != lc->fec || old_lc.fc != lc->fc) { if (link_changed != NULL) *link_changed = true; } } /** * t4_update_port_info - retrieve and update port information if changed * @pi: the port_info * * We issue a Get Port Information Command to the Firmware and, if * successful, we check to see if anything is different from what we * last recorded and update things accordingly. */ int t4_update_port_info(struct port_info *pi) { struct adapter *sc = pi->adapter; struct fw_port_cmd cmd; enum fw_port_action action; int ret; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_PORT_CMD_PORTID(pi->tx_chan)); action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 : FW_PORT_ACTION_GET_PORT_INFO; cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) | FW_LEN16(cmd)); ret = t4_wr_mbox_ns(sc, sc->mbox, &cmd, sizeof(cmd), &cmd); if (ret) return ret; handle_port_info(pi, &cmd, action, NULL, NULL); return 0; } /** * t4_handle_fw_rpl - process a FW reply message * @adap: the adapter * @rpl: start of the FW message * * Processes a FW message, such as link state change messages. */ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) { u8 opcode = *(const u8 *)rpl; const struct fw_port_cmd *p = (const void *)rpl; enum fw_port_action action = G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); bool mod_changed, link_changed; if (opcode == FW_PORT_CMD && (action == FW_PORT_ACTION_GET_PORT_INFO || action == FW_PORT_ACTION_GET_PORT_INFO32)) { /* link/module state change message */ int i; int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); struct port_info *pi = NULL; struct link_config *lc; for_each_port(adap, i) { pi = adap2pinfo(adap, i); if (pi->tx_chan == chan) break; } lc = &pi->link_cfg; PORT_LOCK(pi); handle_port_info(pi, p, action, &mod_changed, &link_changed); PORT_UNLOCK(pi); if (mod_changed) t4_os_portmod_changed(pi); if (link_changed) { PORT_LOCK(pi); t4_os_link_changed(pi); PORT_UNLOCK(pi); } } else { CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode); return -EINVAL; } return 0; } /** * get_pci_mode - determine a card's PCI mode * @adapter: the adapter * @p: where to store the PCI settings * * Determines a card's PCI mode and associated parameters, such as speed * and width. */ static void get_pci_mode(struct adapter *adapter, struct pci_params *p) { u16 val; u32 pcie_cap; pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); if (pcie_cap) { t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); p->speed = val & PCI_EXP_LNKSTA_CLS; p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; } } struct flash_desc { u32 vendor_and_model_id; u32 size_mb; }; int t4_get_flash_params(struct adapter *adapter) { /* * Table for non-standard supported Flash parts. Note, all Flash * parts must have 64KB sectors. */ static struct flash_desc supported_flash[] = { { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ }; int ret; u32 flashid = 0; unsigned int part, manufacturer; unsigned int density, size = 0; /* * Issue a Read ID Command to the Flash part. We decode supported * Flash parts and their sizes from this. There's a newer Query * Command which can retrieve detailed geometry information but many * Flash parts don't support it. */ ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); if (!ret) ret = sf1_read(adapter, 3, 0, 1, &flashid); t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ if (ret < 0) return ret; /* * Check to see if it's one of our non-standard supported Flash parts. */ for (part = 0; part < ARRAY_SIZE(supported_flash); part++) if (supported_flash[part].vendor_and_model_id == flashid) { adapter->params.sf_size = supported_flash[part].size_mb; adapter->params.sf_nsec = adapter->params.sf_size / SF_SEC_SIZE; goto found; } /* * Decode Flash part size. The code below looks repetative with * common encodings, but that's not guaranteed in the JEDEC * specification for the Read JADEC ID command. The only thing that * we're guaranteed by the JADEC specification is where the * Manufacturer ID is in the returned result. After that each * Manufacturer ~could~ encode things completely differently. * Note, all Flash parts must have 64KB sectors. */ manufacturer = flashid & 0xff; switch (manufacturer) { case 0x20: /* Micron/Numonix */ /* * This Density -> Size decoding table is taken from Micron * Data Sheets. */ density = (flashid >> 16) & 0xff; switch (density) { case 0x14: size = 1 << 20; break; /* 1MB */ case 0x15: size = 1 << 21; break; /* 2MB */ case 0x16: size = 1 << 22; break; /* 4MB */ case 0x17: size = 1 << 23; break; /* 8MB */ case 0x18: size = 1 << 24; break; /* 16MB */ case 0x19: size = 1 << 25; break; /* 32MB */ case 0x20: size = 1 << 26; break; /* 64MB */ case 0x21: size = 1 << 27; break; /* 128MB */ case 0x22: size = 1 << 28; break; /* 256MB */ } break; case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */ /* * This Density -> Size decoding table is taken from ISSI * Data Sheets. */ density = (flashid >> 16) & 0xff; switch (density) { case 0x16: size = 1 << 25; break; /* 32MB */ case 0x17: size = 1 << 26; break; /* 64MB */ } break; case 0xc2: /* Macronix */ /* * This Density -> Size decoding table is taken from Macronix * Data Sheets. */ density = (flashid >> 16) & 0xff; switch (density) { case 0x17: size = 1 << 23; break; /* 8MB */ case 0x18: size = 1 << 24; break; /* 16MB */ } break; case 0xef: /* Winbond */ /* * This Density -> Size decoding table is taken from Winbond * Data Sheets. */ density = (flashid >> 16) & 0xff; switch (density) { case 0x17: size = 1 << 23; break; /* 8MB */ case 0x18: size = 1 << 24; break; /* 16MB */ } break; } /* If we didn't recognize the FLASH part, that's no real issue: the * Hardware/Software contract says that Hardware will _*ALWAYS*_ * use a FLASH part which is at least 4MB in size and has 64KB * sectors. The unrecognized FLASH part is likely to be much larger * than 4MB, but that's all we really need. */ if (size == 0) { CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid); size = 1 << 22; } /* * Store decoded Flash size and fall through into vetting code. */ adapter->params.sf_size = size; adapter->params.sf_nsec = size / SF_SEC_SIZE; found: /* * We should ~probably~ reject adapters with FLASHes which are too * small but we have some legacy FPGAs with small FLASHes that we'd * still like to use. So instead we emit a scary message ... */ if (adapter->params.sf_size < FLASH_MIN_SIZE) CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n", flashid, adapter->params.sf_size, FLASH_MIN_SIZE); return 0; } static void set_pcie_completion_timeout(struct adapter *adapter, u8 range) { u16 val; u32 pcie_cap; pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); if (pcie_cap) { t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); val &= 0xfff0; val |= range ; t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); } } const struct chip_params *t4_get_chip_params(int chipid) { static const struct chip_params chip_params[] = { { /* T4 */ .nchan = NCHAN, .pm_stats_cnt = PM_NSTATS, .cng_ch_bits_log = 2, .nsched_cls = 15, .cim_num_obq = CIM_NUM_OBQ, .mps_rplc_size = 128, .vfcount = 128, .sge_fl_db = F_DBPRIO, .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES, }, { /* T5 */ .nchan = NCHAN, .pm_stats_cnt = PM_NSTATS, .cng_ch_bits_log = 2, .nsched_cls = 16, .cim_num_obq = CIM_NUM_OBQ_T5, .mps_rplc_size = 128, .vfcount = 128, .sge_fl_db = F_DBPRIO | F_DBTYPE, .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, }, { /* T6 */ .nchan = T6_NCHAN, .pm_stats_cnt = T6_PM_NSTATS, .cng_ch_bits_log = 3, .nsched_cls = 16, .cim_num_obq = CIM_NUM_OBQ_T5, .mps_rplc_size = 256, .vfcount = 256, .sge_fl_db = 0, .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, }, }; chipid -= CHELSIO_T4; if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params)) return NULL; return &chip_params[chipid]; } /** * t4_prep_adapter - prepare SW and HW for operation * @adapter: the adapter * @buf: temporary space of at least VPD_LEN size provided by the caller. * * Initialize adapter SW state for the various HW modules, set initial * values for some adapter tunables, take PHYs out of reset, and * initialize the MDIO interface. */ int t4_prep_adapter(struct adapter *adapter, u32 *buf) { int ret; uint16_t device_id; uint32_t pl_rev; get_pci_mode(adapter, &adapter->params.pci); pl_rev = t4_read_reg(adapter, A_PL_REV); adapter->params.chipid = G_CHIPID(pl_rev); adapter->params.rev = G_REV(pl_rev); if (adapter->params.chipid == 0) { /* T4 did not have chipid in PL_REV (T5 onwards do) */ adapter->params.chipid = CHELSIO_T4; /* T4A1 chip is not supported */ if (adapter->params.rev == 1) { CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n"); return -EINVAL; } } adapter->chip_params = t4_get_chip_params(chip_id(adapter)); if (adapter->chip_params == NULL) return -EINVAL; adapter->params.pci.vpd_cap_addr = t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); ret = t4_get_flash_params(adapter); if (ret < 0) return ret; /* Cards with real ASICs have the chipid in the PCIe device id */ t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id); if (device_id >> 12 == chip_id(adapter)) adapter->params.cim_la_size = CIMLA_SIZE; else { /* FPGA */ adapter->params.fpga = 1; adapter->params.cim_la_size = 2 * CIMLA_SIZE; } ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf); if (ret < 0) return ret; init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); /* * Default port and clock for debugging in case we can't reach FW. */ adapter->params.nports = 1; adapter->params.portvec = 1; adapter->params.vpd.cclk = 50000; /* Set pci completion timeout value to 4 seconds. */ set_pcie_completion_timeout(adapter, 0xd); return 0; } /** * t4_shutdown_adapter - shut down adapter, host & wire * @adapter: the adapter * * Perform an emergency shutdown of the adapter and stop it from * continuing any further communication on the ports or DMA to the * host. This is typically used when the adapter and/or firmware * have crashed and we want to prevent any further accidental * communication with the rest of the world. This will also force * the port Link Status to go down -- if register writes work -- * which should help our peers figure out that we're down. */ int t4_shutdown_adapter(struct adapter *adapter) { int port; t4_intr_disable(adapter); t4_write_reg(adapter, A_DBG_GPIO_EN, 0); for_each_port(adapter, port) { u32 a_port_cfg = is_t4(adapter) ? PORT_REG(port, A_XGMAC_PORT_CFG) : T5_PORT_REG(port, A_MAC_PORT_CFG); t4_write_reg(adapter, a_port_cfg, t4_read_reg(adapter, a_port_cfg) & ~V_SIGNAL_DET(1)); } t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0); return 0; } /** * t4_bar2_sge_qregs - return BAR2 SGE Queue register information * @adapter: the adapter * @qid: the Queue ID * @qtype: the Ingress or Egress type for @qid * @user: true if this request is for a user mode queue * @pbar2_qoffset: BAR2 Queue Offset * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues * * Returns the BAR2 SGE Queue Registers information associated with the * indicated Absolute Queue ID. These are passed back in return value * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. * * This may return an error which indicates that BAR2 SGE Queue * registers aren't available. If an error is not returned, then the * following values are returned: * * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid * * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which * require the "Inferred Queue ID" ability may be used. E.g. the * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, * then these "Inferred Queue ID" register may not be used. */ int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, enum t4_bar2_qtype qtype, int user, u64 *pbar2_qoffset, unsigned int *pbar2_qid) { unsigned int page_shift, page_size, qpp_shift, qpp_mask; u64 bar2_page_offset, bar2_qoffset; unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; /* T4 doesn't support BAR2 SGE Queue registers for kernel * mode queues. */ if (!user && is_t4(adapter)) return -EINVAL; /* Get our SGE Page Size parameters. */ page_shift = adapter->params.sge.page_shift; page_size = 1 << page_shift; /* Get the right Queues per Page parameters for our Queue. */ qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ? adapter->params.sge.eq_s_qpp : adapter->params.sge.iq_s_qpp); qpp_mask = (1 << qpp_shift) - 1; /* Calculate the basics of the BAR2 SGE Queue register area: * o The BAR2 page the Queue registers will be in. * o The BAR2 Queue ID. * o The BAR2 Queue ID Offset into the BAR2 page. */ bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); bar2_qid = qid & qpp_mask; bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; /* If the BAR2 Queue ID Offset is less than the Page Size, then the * hardware will infer the Absolute Queue ID simply from the writes to * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply * write to the first BAR2 SGE Queue Area within the BAR2 Page with * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID * from the BAR2 Page and BAR2 Queue ID. * * One important censequence of this is that some BAR2 SGE registers * have a "Queue ID" field and we can write the BAR2 SGE Queue ID * there. But other registers synthesize the SGE Queue ID purely * from the writes to the registers -- the Write Combined Doorbell * Buffer is a good example. These BAR2 SGE Registers are only * available for those BAR2 SGE Register areas where the SGE Absolute * Queue ID can be inferred from simple writes. */ bar2_qoffset = bar2_page_offset; bar2_qinferred = (bar2_qid_offset < page_size); if (bar2_qinferred) { bar2_qoffset += bar2_qid_offset; bar2_qid = 0; } *pbar2_qoffset = bar2_qoffset; *pbar2_qid = bar2_qid; return 0; } /** * t4_init_devlog_params - initialize adapter->params.devlog * @adap: the adapter * @fw_attach: whether we can talk to the firmware * * Initialize various fields of the adapter's Firmware Device Log * Parameters structure. */ int t4_init_devlog_params(struct adapter *adap, int fw_attach) { struct devlog_params *dparams = &adap->params.devlog; u32 pf_dparams; unsigned int devlog_meminfo; struct fw_devlog_cmd devlog_cmd; int ret; /* If we're dealing with newer firmware, the Device Log Paramerters * are stored in a designated register which allows us to access the * Device Log even if we can't talk to the firmware. */ pf_dparams = t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG)); if (pf_dparams) { unsigned int nentries, nentries128; dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams); dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4; nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams); nentries = (nentries128 + 1) * 128; dparams->size = nentries * sizeof(struct fw_devlog_e); return 0; } /* * For any failing returns ... */ memset(dparams, 0, sizeof *dparams); /* * If we can't talk to the firmware, there's really nothing we can do * at this point. */ if (!fw_attach) return -ENXIO; /* Otherwise, ask the firmware for it's Device Log Parameters. */ memset(&devlog_cmd, 0, sizeof devlog_cmd); devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ); devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), &devlog_cmd); if (ret) return ret; devlog_meminfo = be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog); dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo); dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4; dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog); return 0; } /** * t4_init_sge_params - initialize adap->params.sge * @adapter: the adapter * * Initialize various fields of the adapter's SGE Parameters structure. */ int t4_init_sge_params(struct adapter *adapter) { u32 r; struct sge_params *sp = &adapter->params.sge; unsigned i, tscale = 1; r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD); sp->counter_val[0] = G_THRESHOLD_0(r); sp->counter_val[1] = G_THRESHOLD_1(r); sp->counter_val[2] = G_THRESHOLD_2(r); sp->counter_val[3] = G_THRESHOLD_3(r); if (chip_id(adapter) >= CHELSIO_T6) { r = t4_read_reg(adapter, A_SGE_ITP_CONTROL); tscale = G_TSCALE(r); if (tscale == 0) tscale = 1; else tscale += 2; } r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1); sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale; sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale; r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3); sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale; sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale; r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5); sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale; sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale; r = t4_read_reg(adapter, A_SGE_CONM_CTRL); sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; if (is_t4(adapter)) sp->fl_starve_threshold2 = sp->fl_starve_threshold; else if (is_t5(adapter)) sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; else sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1; /* egress queues: log2 of # of doorbells per BAR2 page */ r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); r >>= S_QUEUESPERPAGEPF0 + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0; /* ingress queues: log2 of # of doorbells per BAR2 page */ r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); r >>= S_QUEUESPERPAGEPF0 + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0; r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); r >>= S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf; sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10; r = t4_read_reg(adapter, A_SGE_CONTROL); sp->sge_control = r; sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64; sp->fl_pktshift = G_PKTSHIFT(r); if (chip_id(adapter) <= CHELSIO_T5) { sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + X_INGPADBOUNDARY_SHIFT); } else { sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + X_T6_INGPADBOUNDARY_SHIFT); } if (is_t4(adapter)) sp->pack_boundary = sp->pad_boundary; else { r = t4_read_reg(adapter, A_SGE_CONTROL2); if (G_INGPACKBOUNDARY(r) == 0) sp->pack_boundary = 16; else sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5); } for (i = 0; i < SGE_FLBUF_SIZES; i++) sp->sge_fl_buffer_size[i] = t4_read_reg(adapter, A_SGE_FL_BUFFER_SIZE0 + (4 * i)); return 0; } /* * Read and cache the adapter's compressed filter mode and ingress config. */ static void read_filter_mode_and_ingress_config(struct adapter *adap, bool sleep_ok) { uint32_t v; struct tp_params *tpp = &adap->params.tp; t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok); t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG, sleep_ok); /* * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field * shift positions of several elements of the Compressed Filter Tuple * for this adapter which we need frequently ... */ tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE); tpp->port_shift = t4_filter_field_shift(adap, F_PORT); tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN); tpp->tos_shift = t4_filter_field_shift(adap, F_TOS); tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE); tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH); tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE); tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION); if (chip_id(adap) > CHELSIO_T4) { v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3)); adap->params.tp.hash_filter_mask = v; v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4)); adap->params.tp.hash_filter_mask |= (u64)v << 32; } } /** * t4_init_tp_params - initialize adap->params.tp * @adap: the adapter * * Initialize various fields of the adapter's TP Parameters structure. */ int t4_init_tp_params(struct adapter *adap, bool sleep_ok) { int chan; u32 v; struct tp_params *tpp = &adap->params.tp; v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); tpp->tre = G_TIMERRESOLUTION(v); tpp->dack_re = G_DELAYEDACKRESOLUTION(v); /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ for (chan = 0; chan < MAX_NCHAN; chan++) tpp->tx_modq[chan] = chan; read_filter_mode_and_ingress_config(adap, sleep_ok); /* * Cache a mask of the bits that represent the error vector portion of * rx_pkt.err_vec. T6+ can use a compressed error vector to make room * for information about outer encapsulation (GENEVE/VXLAN/NVGRE). */ tpp->err_vec_mask = htobe16(0xffff); if (chip_id(adap) > CHELSIO_T5) { v = t4_read_reg(adap, A_TP_OUT_CONFIG); if (v & F_CRXPKTENC) { tpp->err_vec_mask = htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC)); } } return 0; } /** * t4_filter_field_shift - calculate filter field shift * @adap: the adapter * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) * * Return the shift position of a filter field within the Compressed * Filter Tuple. The filter field is specified via its selection bit * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. */ int t4_filter_field_shift(const struct adapter *adap, int filter_sel) { unsigned int filter_mode = adap->params.tp.vlan_pri_map; unsigned int sel; int field_shift; if ((filter_mode & filter_sel) == 0) return -1; for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { switch (filter_mode & sel) { case F_FCOE: field_shift += W_FT_FCOE; break; case F_PORT: field_shift += W_FT_PORT; break; case F_VNIC_ID: field_shift += W_FT_VNIC_ID; break; case F_VLAN: field_shift += W_FT_VLAN; break; case F_TOS: field_shift += W_FT_TOS; break; case F_PROTOCOL: field_shift += W_FT_PROTOCOL; break; case F_ETHERTYPE: field_shift += W_FT_ETHERTYPE; break; case F_MACMATCH: field_shift += W_FT_MACMATCH; break; case F_MPSHITTYPE: field_shift += W_FT_MPSHITTYPE; break; case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break; } } return field_shift; } int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id) { u8 addr[6]; int ret, i, j; - u16 rss_size; struct port_info *p = adap2pinfo(adap, port_id); u32 param, val; + struct vi_info *vi = &p->vi[0]; for (i = 0, j = -1; i <= p->port_id; i++) { do { j++; } while ((adap->params.portvec & (1 << j)) == 0); } p->tx_chan = j; p->mps_bg_map = t4_get_mps_bg_map(adap, j); p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j); p->lport = j; if (!(adap->flags & IS_VF) || adap->params.vfres.r_caps & FW_CMD_CAP_PORT) { t4_update_port_info(p); } - ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); + ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &vi->rss_size, + &vi->vfvld, &vi->vin); if (ret < 0) return ret; - p->vi[0].viid = ret; - if (chip_id(adap) <= CHELSIO_T5) - p->vi[0].smt_idx = (ret & 0x7f) << 1; - else - p->vi[0].smt_idx = (ret & 0x7f); - p->vi[0].rss_size = rss_size; + vi->viid = ret; t4_os_set_hw_addr(p, addr); param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | - V_FW_PARAMS_PARAM_YZ(p->vi[0].viid); + V_FW_PARAMS_PARAM_YZ(vi->viid); ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val); if (ret) - p->vi[0].rss_base = 0xffff; + vi->rss_base = 0xffff; else { /* MPASS((val >> 16) == rss_size); */ - p->vi[0].rss_base = val & 0xffff; + vi->rss_base = val & 0xffff; } return 0; } /** * t4_read_cimq_cfg - read CIM queue configuration * @adap: the adapter * @base: holds the queue base addresses in bytes * @size: holds the queue sizes in bytes * @thres: holds the queue full thresholds in bytes * * Returns the current configuration of the CIM queues, starting with * the IBQs, then the OBQs. */ void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) { unsigned int i, v; int cim_num_obq = adap->chip_params->cim_num_obq; for (i = 0; i < CIM_NUM_IBQ; i++) { t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | V_QUENUMSELECT(i)); v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); /* value is in 256-byte units */ *base++ = G_CIMQBASE(v) * 256; *size++ = G_CIMQSIZE(v) * 256; *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ } for (i = 0; i < cim_num_obq; i++) { t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | V_QUENUMSELECT(i)); v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); /* value is in 256-byte units */ *base++ = G_CIMQBASE(v) * 256; *size++ = G_CIMQSIZE(v) * 256; } } /** * t4_read_cim_ibq - read the contents of a CIM inbound queue * @adap: the adapter * @qid: the queue index * @data: where to store the queue contents * @n: capacity of @data in 32-bit words * * Reads the contents of the selected CIM queue starting at address 0 up * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on * error and the number of 32-bit words actually read on success. */ int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) { int i, err, attempts; unsigned int addr; const unsigned int nwords = CIM_IBQ_SIZE * 4; if (qid > 5 || (n & 3)) return -EINVAL; addr = qid * nwords; if (n > nwords) n = nwords; /* It might take 3-10ms before the IBQ debug read access is allowed. * Wait for 1 Sec with a delay of 1 usec. */ attempts = 1000000; for (i = 0; i < n; i++, addr++) { t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | F_IBQDBGEN); err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, attempts, 1); if (err) return err; *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); } t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); return i; } /** * t4_read_cim_obq - read the contents of a CIM outbound queue * @adap: the adapter * @qid: the queue index * @data: where to store the queue contents * @n: capacity of @data in 32-bit words * * Reads the contents of the selected CIM queue starting at address 0 up * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on * error and the number of 32-bit words actually read on success. */ int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) { int i, err; unsigned int addr, v, nwords; int cim_num_obq = adap->chip_params->cim_num_obq; if ((qid > (cim_num_obq - 1)) || (n & 3)) return -EINVAL; t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | V_QUENUMSELECT(qid)); v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ nwords = G_CIMQSIZE(v) * 64; /* same */ if (n > nwords) n = nwords; for (i = 0; i < n; i++, addr++) { t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | F_OBQDBGEN); err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 2, 1); if (err) return err; *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); } t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); return i; } enum { CIM_QCTL_BASE = 0, CIM_CTL_BASE = 0x2000, CIM_PBT_ADDR_BASE = 0x2800, CIM_PBT_LRF_BASE = 0x3000, CIM_PBT_DATA_BASE = 0x3800 }; /** * t4_cim_read - read a block from CIM internal address space * @adap: the adapter * @addr: the start address within the CIM address space * @n: number of words to read * @valp: where to store the result * * Reads a block of 4-byte words from the CIM intenal address space. */ int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, unsigned int *valp) { int ret = 0; if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) return -EBUSY; for ( ; !ret && n--; addr += 4) { t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 0, 5, 2); if (!ret) *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); } return ret; } /** * t4_cim_write - write a block into CIM internal address space * @adap: the adapter * @addr: the start address within the CIM address space * @n: number of words to write * @valp: set of values to write * * Writes a block of 4-byte words into the CIM intenal address space. */ int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, const unsigned int *valp) { int ret = 0; if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) return -EBUSY; for ( ; !ret && n--; addr += 4) { t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 0, 5, 2); } return ret; } static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val) { return t4_cim_write(adap, addr, 1, &val); } /** * t4_cim_ctl_read - read a block from CIM control region * @adap: the adapter * @addr: the start address within the CIM control region * @n: number of words to read * @valp: where to store the result * * Reads a block of 4-byte words from the CIM control region. */ int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, unsigned int *valp) { return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp); } /** * t4_cim_read_la - read CIM LA capture buffer * @adap: the adapter * @la_buf: where to store the LA data * @wrptr: the HW write pointer within the capture buffer * * Reads the contents of the CIM LA buffer with the most recent entry at * the end of the returned data and with the entry at @wrptr first. * We try to leave the LA in the running state we find it in. */ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) { int i, ret; unsigned int cfg, val, idx; ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); if (ret) return ret; if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); if (ret) return ret; } ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); if (ret) goto restart; idx = G_UPDBGLAWRPTR(val); if (wrptr) *wrptr = idx; for (i = 0; i < adap->params.cim_la_size; i++) { ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); if (ret) break; ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); if (ret) break; if (val & F_UPDBGLARDEN) { ret = -ETIMEDOUT; break; } ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); if (ret) break; /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */ idx = (idx + 1) & M_UPDBGLARDPTR; /* * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to * identify the 32-bit portion of the full 312-bit data */ if (is_t6(adap)) while ((idx & 0xf) > 9) idx = (idx + 1) % M_UPDBGLARDPTR; } restart: if (cfg & F_UPDBGLAEN) { int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, cfg & ~F_UPDBGLARDEN); if (!ret) ret = r; } return ret; } /** * t4_tp_read_la - read TP LA capture buffer * @adap: the adapter * @la_buf: where to store the LA data * @wrptr: the HW write pointer within the capture buffer * * Reads the contents of the TP LA buffer with the most recent entry at * the end of the returned data and with the entry at @wrptr first. * We leave the LA in the running state we find it in. */ void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) { bool last_incomplete; unsigned int i, cfg, val, idx; cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; if (cfg & F_DBGLAENABLE) /* freeze LA */ t4_write_reg(adap, A_TP_DBG_LA_CONFIG, adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); idx = G_DBGLAWPTR(val); last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; if (last_incomplete) idx = (idx + 1) & M_DBGLARPTR; if (wrptr) *wrptr = idx; val &= 0xffff; val &= ~V_DBGLARPTR(M_DBGLARPTR); val |= adap->params.tp.la_mask; for (i = 0; i < TPLA_SIZE; i++) { t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); idx = (idx + 1) & M_DBGLARPTR; } /* Wipe out last entry if it isn't valid */ if (last_incomplete) la_buf[TPLA_SIZE - 1] = ~0ULL; if (cfg & F_DBGLAENABLE) /* restore running state */ t4_write_reg(adap, A_TP_DBG_LA_CONFIG, cfg | adap->params.tp.la_mask); } /* * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in * seconds). If we find one of the SGE Ingress DMA State Machines in the same * state for more than the Warning Threshold then we'll issue a warning about * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel * appears to be hung every Warning Repeat second till the situation clears. * If the situation clears, we'll note that as well. */ #define SGE_IDMA_WARN_THRESH 1 #define SGE_IDMA_WARN_REPEAT 300 /** * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor * @adapter: the adapter * @idma: the adapter IDMA Monitor state * * Initialize the state of an SGE Ingress DMA Monitor. */ void t4_idma_monitor_init(struct adapter *adapter, struct sge_idma_monitor_state *idma) { /* Initialize the state variables for detecting an SGE Ingress DMA * hang. The SGE has internal counters which count up on each clock * tick whenever the SGE finds its Ingress DMA State Engines in the * same state they were on the previous clock tick. The clock used is * the Core Clock so we have a limit on the maximum "time" they can * record; typically a very small number of seconds. For instance, * with a 600MHz Core Clock, we can only count up to a bit more than * 7s. So we'll synthesize a larger counter in order to not run the * risk of having the "timers" overflow and give us the flexibility to * maintain a Hung SGE State Machine of our own which operates across * a longer time frame. */ idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */ idma->idma_stalled[0] = idma->idma_stalled[1] = 0; } /** * t4_idma_monitor - monitor SGE Ingress DMA state * @adapter: the adapter * @idma: the adapter IDMA Monitor state * @hz: number of ticks/second * @ticks: number of ticks since the last IDMA Monitor call */ void t4_idma_monitor(struct adapter *adapter, struct sge_idma_monitor_state *idma, int hz, int ticks) { int i, idma_same_state_cnt[2]; /* Read the SGE Debug Ingress DMA Same State Count registers. These * are counters inside the SGE which count up on each clock when the * SGE finds its Ingress DMA State Engines in the same states they * were in the previous clock. The counters will peg out at * 0xffffffff without wrapping around so once they pass the 1s * threshold they'll stay above that till the IDMA state changes. */ t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13); idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH); idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); for (i = 0; i < 2; i++) { u32 debug0, debug11; /* If the Ingress DMA Same State Counter ("timer") is less * than 1s, then we can reset our synthesized Stall Timer and * continue. If we have previously emitted warnings about a * potential stalled Ingress Queue, issue a note indicating * that the Ingress Queue has resumed forward progress. */ if (idma_same_state_cnt[i] < idma->idma_1s_thresh) { if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz) CH_WARN(adapter, "SGE idma%d, queue %u, " "resumed after %d seconds\n", i, idma->idma_qid[i], idma->idma_stalled[i]/hz); idma->idma_stalled[i] = 0; continue; } /* Synthesize an SGE Ingress DMA Same State Timer in the Hz * domain. The first time we get here it'll be because we * passed the 1s Threshold; each additional time it'll be * because the RX Timer Callback is being fired on its regular * schedule. * * If the stall is below our Potential Hung Ingress Queue * Warning Threshold, continue. */ if (idma->idma_stalled[i] == 0) { idma->idma_stalled[i] = hz; idma->idma_warn[i] = 0; } else { idma->idma_stalled[i] += ticks; idma->idma_warn[i] -= ticks; } if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz) continue; /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds. */ if (idma->idma_warn[i] > 0) continue; idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz; /* Read and save the SGE IDMA State and Queue ID information. * We do this every time in case it changes across time ... * can't be too careful ... */ t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0); debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11); debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in " " state %u for %d seconds (debug0=%#x, debug11=%#x)\n", i, idma->idma_qid[i], idma->idma_state[i], idma->idma_stalled[i]/hz, debug0, debug11); t4_sge_decode_idma_state(adapter, idma->idma_state[i]); } } /** * t4_read_pace_tbl - read the pace table * @adap: the adapter * @pace_vals: holds the returned values * * Returns the values of TP's pace table in microseconds. */ void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) { unsigned int i, v; for (i = 0; i < NTX_SCHED; i++) { t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); v = t4_read_reg(adap, A_TP_PACE_TABLE); pace_vals[i] = dack_ticks_to_usec(adap, v); } } /** * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler * @adap: the adapter * @sched: the scheduler index * @kbps: the byte rate in Kbps * @ipg: the interpacket delay in tenths of nanoseconds * * Return the current configuration of a HW Tx scheduler. */ void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, unsigned int *ipg, bool sleep_ok) { unsigned int v, addr, bpt, cpt; if (kbps) { addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); if (sched & 1) v >>= 16; bpt = (v >> 8) & 0xff; cpt = v & 0xff; if (!cpt) *kbps = 0; /* scheduler disabled */ else { v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ *kbps = (v * bpt) / 125; } } if (ipg) { addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); if (sched & 1) v >>= 16; v &= 0xffff; *ipg = (10000 * v) / core_ticks_per_usec(adap); } } /** * t4_load_cfg - download config file * @adap: the adapter * @cfg_data: the cfg text file to write * @size: text file size * * Write the supplied config text file to the card's serial flash. */ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) { int ret, i, n, cfg_addr; unsigned int addr; unsigned int flash_cfg_start_sec; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; cfg_addr = t4_flash_cfg_addr(adap); if (cfg_addr < 0) return cfg_addr; addr = cfg_addr; flash_cfg_start_sec = addr / SF_SEC_SIZE; if (size > FLASH_CFG_MAX_SIZE) { CH_ERR(adap, "cfg file too large, max is %u bytes\n", FLASH_CFG_MAX_SIZE); return -EFBIG; } i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ sf_sec_size); ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, flash_cfg_start_sec + i - 1); /* * If size == 0 then we're simply erasing the FLASH sectors associated * with the on-adapter Firmware Configuration File. */ if (ret || size == 0) goto out; /* this will write to the flash up to SF_PAGE_SIZE at a time */ for (i = 0; i< size; i+= SF_PAGE_SIZE) { if ( (size - i) < SF_PAGE_SIZE) n = size - i; else n = SF_PAGE_SIZE; ret = t4_write_flash(adap, addr, n, cfg_data, 1); if (ret) goto out; addr += SF_PAGE_SIZE; cfg_data += SF_PAGE_SIZE; } out: if (ret) CH_ERR(adap, "config file %s failed %d\n", (size == 0 ? "clear" : "download"), ret); return ret; } /** * t5_fw_init_extern_mem - initialize the external memory * @adap: the adapter * * Initializes the external memory on T5. */ int t5_fw_init_extern_mem(struct adapter *adap) { u32 params[1], val[1]; int ret; if (!is_t5(adap)) return 0; val[0] = 0xff; /* Initialize all MCs */ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT)); ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val, FW_CMD_MAX_TIMEOUT); return ret; } /* BIOS boot headers */ typedef struct pci_expansion_rom_header { u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ u8 reserved[22]; /* Reserved per processor Architecture data */ u8 pcir_offset[2]; /* Offset to PCI Data Structure */ } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ /* Legacy PCI Expansion ROM Header */ typedef struct legacy_pci_expansion_rom_header { u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ u8 size512; /* Current Image Size in units of 512 bytes */ u8 initentry_point[4]; u8 cksum; /* Checksum computed on the entire Image */ u8 reserved[16]; /* Reserved */ u8 pcir_offset[2]; /* Offset to PCI Data Struture */ } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ /* EFI PCI Expansion ROM Header */ typedef struct efi_pci_expansion_rom_header { u8 signature[2]; // ROM signature. The value 0xaa55 u8 initialization_size[2]; /* Units 512. Includes this header */ u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ u8 efi_machine_type[2]; /* Machine type from EFI image header */ u8 compression_type[2]; /* Compression type. */ /* * Compression type definition * 0x0: uncompressed * 0x1: Compressed * 0x2-0xFFFF: Reserved */ u8 reserved[8]; /* Reserved */ u8 efi_image_header_offset[2]; /* Offset to EFI Image */ u8 pcir_offset[2]; /* Offset to PCI Data Structure */ } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ /* PCI Data Structure Format */ typedef struct pcir_data_structure { /* PCI Data Structure */ u8 signature[4]; /* Signature. The string "PCIR" */ u8 vendor_id[2]; /* Vendor Identification */ u8 device_id[2]; /* Device Identification */ u8 vital_product[2]; /* Pointer to Vital Product Data */ u8 length[2]; /* PCIR Data Structure Length */ u8 revision; /* PCIR Data Structure Revision */ u8 class_code[3]; /* Class Code */ u8 image_length[2]; /* Image Length. Multiple of 512B */ u8 code_revision[2]; /* Revision Level of Code/Data */ u8 code_type; /* Code Type. */ /* * PCI Expansion ROM Code Types * 0x00: Intel IA-32, PC-AT compatible. Legacy * 0x01: Open Firmware standard for PCI. FCODE * 0x02: Hewlett-Packard PA RISC. HP reserved * 0x03: EFI Image. EFI * 0x04-0xFF: Reserved. */ u8 indicator; /* Indicator. Identifies the last image in the ROM */ u8 reserved[2]; /* Reserved */ } pcir_data_t; /* PCI__DATA_STRUCTURE */ /* BOOT constants */ enum { BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ VENDOR_ID = 0x1425, /* Vendor ID */ PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ }; /* * modify_device_id - Modifies the device ID of the Boot BIOS image * @adatper: the device ID to write. * @boot_data: the boot image to modify. * * Write the supplied device ID to the boot BIOS image. */ static void modify_device_id(int device_id, u8 *boot_data) { legacy_pci_exp_rom_header_t *header; pcir_data_t *pcir_header; u32 cur_header = 0; /* * Loop through all chained images and change the device ID's */ while (1) { header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; pcir_header = (pcir_data_t *) &boot_data[cur_header + le16_to_cpu(*(u16*)header->pcir_offset)]; /* * Only modify the Device ID if code type is Legacy or HP. * 0x00: Okay to modify * 0x01: FCODE. Do not be modify * 0x03: Okay to modify * 0x04-0xFF: Do not modify */ if (pcir_header->code_type == 0x00) { u8 csum = 0; int i; /* * Modify Device ID to match current adatper */ *(u16*) pcir_header->device_id = device_id; /* * Set checksum temporarily to 0. * We will recalculate it later. */ header->cksum = 0x0; /* * Calculate and update checksum */ for (i = 0; i < (header->size512 * 512); i++) csum += (u8)boot_data[cur_header + i]; /* * Invert summed value to create the checksum * Writing new checksum value directly to the boot data */ boot_data[cur_header + 7] = -csum; } else if (pcir_header->code_type == 0x03) { /* * Modify Device ID to match current adatper */ *(u16*) pcir_header->device_id = device_id; } /* * Check indicator element to identify if this is the last * image in the ROM. */ if (pcir_header->indicator & 0x80) break; /* * Move header pointer up to the next image in the ROM. */ cur_header += header->size512 * 512; } } /* * t4_load_boot - download boot flash * @adapter: the adapter * @boot_data: the boot image to write * @boot_addr: offset in flash to write boot_data * @size: image size * * Write the supplied boot image to the card's serial flash. * The boot image has the following sections: a 28-byte header and the * boot image. */ int t4_load_boot(struct adapter *adap, u8 *boot_data, unsigned int boot_addr, unsigned int size) { pci_exp_rom_header_t *header; int pcir_offset ; pcir_data_t *pcir_header; int ret, addr; uint16_t device_id; unsigned int i; unsigned int boot_sector = (boot_addr * 1024 ); unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; /* * Make sure the boot image does not encroach on the firmware region */ if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { CH_ERR(adap, "boot image encroaching on firmware region\n"); return -EFBIG; } /* * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot, * and Boot configuration data sections. These 3 boot sections span * sectors 0 to 7 in flash and live right before the FW image location. */ i = DIV_ROUND_UP(size ? size : FLASH_FW_START, sf_sec_size); ret = t4_flash_erase_sectors(adap, boot_sector >> 16, (boot_sector >> 16) + i - 1); /* * If size == 0 then we're simply erasing the FLASH sectors associated * with the on-adapter option ROM file */ if (ret || (size == 0)) goto out; /* Get boot header */ header = (pci_exp_rom_header_t *)boot_data; pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); /* PCIR Data Structure */ pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; /* * Perform some primitive sanity testing to avoid accidentally * writing garbage over the boot sectors. We ought to check for * more but it's not worth it for now ... */ if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { CH_ERR(adap, "boot image too small/large\n"); return -EFBIG; } #ifndef CHELSIO_T4_DIAGS /* * Check BOOT ROM header signature */ if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { CH_ERR(adap, "Boot image missing signature\n"); return -EINVAL; } /* * Check PCI header signature */ if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { CH_ERR(adap, "PCI header missing signature\n"); return -EINVAL; } /* * Check Vendor ID matches Chelsio ID */ if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { CH_ERR(adap, "Vendor ID missing signature\n"); return -EINVAL; } #endif /* * Retrieve adapter's device ID */ t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); /* Want to deal with PF 0 so I strip off PF 4 indicator */ device_id = device_id & 0xf0ff; /* * Check PCIE Device ID */ if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { /* * Change the device ID in the Boot BIOS image to match * the Device ID of the current adapter. */ modify_device_id(device_id, boot_data); } /* * Skip over the first SF_PAGE_SIZE worth of data and write it after * we finish copying the rest of the boot image. This will ensure * that the BIOS boot header will only be written if the boot image * was written in full. */ addr = boot_sector; for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { addr += SF_PAGE_SIZE; boot_data += SF_PAGE_SIZE; ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); if (ret) goto out; } ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, (const u8 *)header, 0); out: if (ret) CH_ERR(adap, "boot image download failed, error %d\n", ret); return ret; } /* * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration * @adapter: the adapter * * Return the address within the flash where the OptionROM Configuration * is stored, or an error if the device FLASH is too small to contain * a OptionROM Configuration. */ static int t4_flash_bootcfg_addr(struct adapter *adapter) { /* * If the device FLASH isn't large enough to hold a Firmware * Configuration File, return an error. */ if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE) return -ENOSPC; return FLASH_BOOTCFG_START; } int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size) { int ret, i, n, cfg_addr; unsigned int addr; unsigned int flash_cfg_start_sec; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; cfg_addr = t4_flash_bootcfg_addr(adap); if (cfg_addr < 0) return cfg_addr; addr = cfg_addr; flash_cfg_start_sec = addr / SF_SEC_SIZE; if (size > FLASH_BOOTCFG_MAX_SIZE) { CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", FLASH_BOOTCFG_MAX_SIZE); return -EFBIG; } i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */ sf_sec_size); ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, flash_cfg_start_sec + i - 1); /* * If size == 0 then we're simply erasing the FLASH sectors associated * with the on-adapter OptionROM Configuration File. */ if (ret || size == 0) goto out; /* this will write to the flash up to SF_PAGE_SIZE at a time */ for (i = 0; i< size; i+= SF_PAGE_SIZE) { if ( (size - i) < SF_PAGE_SIZE) n = size - i; else n = SF_PAGE_SIZE; ret = t4_write_flash(adap, addr, n, cfg_data, 0); if (ret) goto out; addr += SF_PAGE_SIZE; cfg_data += SF_PAGE_SIZE; } out: if (ret) CH_ERR(adap, "boot config data %s failed %d\n", (size == 0 ? "clear" : "download"), ret); return ret; } /** * t4_set_filter_mode - configure the optional components of filter tuples * @adap: the adapter * @mode_map: a bitmap selcting which optional filter components to enable * @sleep_ok: if true we may sleep while awaiting command completion * * Sets the filter mode by selecting the optional components to enable * in filter tuples. Returns 0 on success and a negative error if the * requested mode needs more bits than are available for optional * components. */ int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map, bool sleep_ok) { static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; int i, nbits = 0; for (i = S_FCOE; i <= S_FRAGMENTATION; i++) if (mode_map & (1 << i)) nbits += width[i]; if (nbits > FILTER_OPT_LEN) return -EINVAL; t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok); read_filter_mode_and_ingress_config(adap, sleep_ok); return 0; } /** * t4_clr_port_stats - clear port statistics * @adap: the adapter * @idx: the port index * * Clear HW statistics for the given port. */ void t4_clr_port_stats(struct adapter *adap, int idx) { unsigned int i; u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; u32 port_base_addr; if (is_t4(adap)) port_base_addr = PORT_BASE(idx); else port_base_addr = T5_PORT_BASE(idx); for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) t4_write_reg(adap, port_base_addr + i, 0); for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) t4_write_reg(adap, port_base_addr + i, 0); for (i = 0; i < 4; i++) if (bgmap & (1 << i)) { t4_write_reg(adap, A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); t4_write_reg(adap, A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); } } /** * t4_i2c_rd - read I2C data from adapter * @adap: the adapter * @port: Port number if per-port device; <0 if not * @devid: per-port device ID or absolute device ID * @offset: byte offset into device I2C space * @len: byte length of I2C space data * @buf: buffer in which to return I2C data * * Reads the I2C data from the indicated device and location. */ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf) { u32 ldst_addrspace; struct fw_ldst_cmd ldst; int ret; if (port >= 4 || devid >= 256 || offset >= 256 || len > sizeof ldst.u.i2c.data) return -EINVAL; memset(&ldst, 0, sizeof ldst); ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C); ldst.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | ldst_addrspace); ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); ldst.u.i2c.pid = (port < 0 ? 0xff : port); ldst.u.i2c.did = devid; ldst.u.i2c.boffset = offset; ldst.u.i2c.blen = len; ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); if (!ret) memcpy(buf, ldst.u.i2c.data, len); return ret; } /** * t4_i2c_wr - write I2C data to adapter * @adap: the adapter * @port: Port number if per-port device; <0 if not * @devid: per-port device ID or absolute device ID * @offset: byte offset into device I2C space * @len: byte length of I2C space data * @buf: buffer containing new I2C data * * Write the I2C data to the indicated device and location. */ int t4_i2c_wr(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf) { u32 ldst_addrspace; struct fw_ldst_cmd ldst; if (port >= 4 || devid >= 256 || offset >= 256 || len > sizeof ldst.u.i2c.data) return -EINVAL; memset(&ldst, 0, sizeof ldst); ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C); ldst.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | ldst_addrspace); ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); ldst.u.i2c.pid = (port < 0 ? 0xff : port); ldst.u.i2c.did = devid; ldst.u.i2c.boffset = offset; ldst.u.i2c.blen = len; memcpy(ldst.u.i2c.data, buf, len); return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); } /** * t4_sge_ctxt_rd - read an SGE context through FW * @adap: the adapter * @mbox: mailbox to use for the FW command * @cid: the context id * @ctype: the context type * @data: where to store the context data * * Issues a FW command through the given mailbox to read an SGE context. */ int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, enum ctxt_type ctype, u32 *data) { int ret; struct fw_ldst_cmd c; if (ctype == CTXT_EGRESS) ret = FW_LDST_ADDRSPC_SGE_EGRC; else if (ctype == CTXT_INGRESS) ret = FW_LDST_ADDRSPC_SGE_INGC; else if (ctype == CTXT_FLM) ret = FW_LDST_ADDRSPC_SGE_FLMC; else ret = FW_LDST_ADDRSPC_SGE_CONMC; memset(&c, 0, sizeof(c)); c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret)); c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c.u.idctxt.physid = cpu_to_be32(cid); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); } return ret; } /** * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW * @adap: the adapter * @cid: the context id * @ctype: the context type * @data: where to store the context data * * Reads an SGE context directly, bypassing FW. This is only for * debugging when FW is unavailable. */ int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, u32 *data) { int i, ret; t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); if (!ret) for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) *data++ = t4_read_reg(adap, i); return ret; } int t4_sched_config(struct adapter *adapter, int type, int minmaxen, int sleep_ok) { struct fw_sched_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.u.config.sc = FW_SCHED_SC_CONFIG; cmd.u.config.type = type; cmd.u.config.minmaxen = minmaxen; return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), NULL, sleep_ok); } int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int rateunit, int ratemode, int channel, int cl, int minrate, int maxrate, int weight, int pktsize, int burstsize, int sleep_ok) { struct fw_sched_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.u.params.sc = FW_SCHED_SC_PARAMS; cmd.u.params.type = type; cmd.u.params.level = level; cmd.u.params.mode = mode; cmd.u.params.ch = channel; cmd.u.params.cl = cl; cmd.u.params.unit = rateunit; cmd.u.params.rate = ratemode; cmd.u.params.min = cpu_to_be32(minrate); cmd.u.params.max = cpu_to_be32(maxrate); cmd.u.params.weight = cpu_to_be16(weight); cmd.u.params.pktsize = cpu_to_be16(pktsize); cmd.u.params.burstsize = cpu_to_be16(burstsize); return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), NULL, sleep_ok); } int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode, unsigned int maxrate, int sleep_ok) { struct fw_sched_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.u.params.sc = FW_SCHED_SC_PARAMS; cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL; cmd.u.params.ch = channel; cmd.u.params.rate = ratemode; /* REL or ABS */ cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */ return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), NULL, sleep_ok); } int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl, int weight, int sleep_ok) { struct fw_sched_cmd cmd; if (weight < 0 || weight > 100) return -EINVAL; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.u.params.sc = FW_SCHED_SC_PARAMS; cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR; cmd.u.params.ch = channel; cmd.u.params.cl = cl; cmd.u.params.weight = cpu_to_be16(weight); return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), NULL, sleep_ok); } int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl, int mode, unsigned int maxrate, int pktsize, int sleep_ok) { struct fw_sched_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.u.params.sc = FW_SCHED_SC_PARAMS; cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL; cmd.u.params.mode = mode; cmd.u.params.ch = channel; cmd.u.params.cl = cl; cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE; cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS; cmd.u.params.max = cpu_to_be32(maxrate); cmd.u.params.pktsize = cpu_to_be16(pktsize); return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), NULL, sleep_ok); } /* * t4_config_watchdog - configure (enable/disable) a watchdog timer * @adapter: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @timeout: watchdog timeout in ms * @action: watchdog timer / action * * There are separate watchdog timers for each possible watchdog * action. Configure one of the watchdog timers by setting a non-zero * timeout. Disable a watchdog timer by using a timeout of zero. */ int t4_config_watchdog(struct adapter *adapter, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int timeout, unsigned int action) { struct fw_watchdog_cmd wdog; unsigned int ticks; /* * The watchdog command expects a timeout in units of 10ms so we need * to convert it here (via rounding) and force a minimum of one 10ms * "tick" if the timeout is non-zero but the conversion results in 0 * ticks. */ ticks = (timeout + 5)/10; if (timeout && !ticks) ticks = 1; memset(&wdog, 0, sizeof wdog); wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) | V_FW_PARAMS_CMD_VFN(vf)); wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog)); wdog.timeout = cpu_to_be32(ticks); wdog.action = cpu_to_be32(action); return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL); } int t4_get_devlog_level(struct adapter *adapter, unsigned int *level) { struct fw_devlog_cmd devlog_cmd; int ret; memset(&devlog_cmd, 0, sizeof(devlog_cmd)); devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ); devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, sizeof(devlog_cmd), &devlog_cmd); if (ret) return ret; *level = devlog_cmd.level; return 0; } int t4_set_devlog_level(struct adapter *adapter, unsigned int level) { struct fw_devlog_cmd devlog_cmd; memset(&devlog_cmd, 0, sizeof(devlog_cmd)); devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); devlog_cmd.level = level; devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, sizeof(devlog_cmd), &devlog_cmd); } Index: stable/11/sys/dev/cxgbe/firmware/t4fw_interface.h =================================================================== --- stable/11/sys/dev/cxgbe/firmware/t4fw_interface.h (revision 346966) +++ stable/11/sys/dev/cxgbe/firmware/t4fw_interface.h (revision 346967) @@ -1,9903 +1,9923 @@ /*- * Copyright (c) 2012-2017 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _T4FW_INTERFACE_H_ #define _T4FW_INTERFACE_H_ /****************************************************************************** * R E T U R N V A L U E S ********************************/ enum fw_retval { FW_SUCCESS = 0, /* completed successfully */ FW_EPERM = 1, /* operation not permitted */ FW_ENOENT = 2, /* no such file or directory */ FW_EIO = 5, /* input/output error; hw bad */ FW_ENOEXEC = 8, /* exec format error; inv microcode */ FW_EAGAIN = 11, /* try again */ FW_ENOMEM = 12, /* out of memory */ FW_EFAULT = 14, /* bad address; fw bad */ FW_EBUSY = 16, /* resource busy */ FW_EEXIST = 17, /* file exists */ FW_ENODEV = 19, /* no such device */ FW_EINVAL = 22, /* invalid argument */ FW_ENOSPC = 28, /* no space left on device */ FW_ENOSYS = 38, /* functionality not implemented */ FW_ENODATA = 61, /* no data available */ FW_EPROTO = 71, /* protocol error */ FW_EADDRINUSE = 98, /* address already in use */ FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */ FW_ENETDOWN = 100, /* network is down */ FW_ENETUNREACH = 101, /* network is unreachable */ FW_ENOBUFS = 105, /* no buffer space available */ FW_ETIMEDOUT = 110, /* timeout */ FW_EINPROGRESS = 115, /* fw internal */ FW_SCSI_ABORT_REQUESTED = 128, /* */ FW_SCSI_ABORT_TIMEDOUT = 129, /* */ FW_SCSI_ABORTED = 130, /* */ FW_SCSI_CLOSE_REQUESTED = 131, /* */ FW_ERR_LINK_DOWN = 132, /* */ FW_RDEV_NOT_READY = 133, /* */ FW_ERR_RDEV_LOST = 134, /* */ FW_ERR_RDEV_LOGO = 135, /* */ FW_FCOE_NO_XCHG = 136, /* */ FW_SCSI_RSP_ERR = 137, /* */ FW_ERR_RDEV_IMPL_LOGO = 138, /* */ FW_SCSI_UNDER_FLOW_ERR = 139, /* */ FW_SCSI_OVER_FLOW_ERR = 140, /* */ FW_SCSI_DDP_ERR = 141, /* DDP error*/ FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */ FW_SCSI_IO_BLOCK = 143, /* IO is going to be blocked due to resource failure */ }; /****************************************************************************** * M E M O R Y T Y P E s ******************************/ enum fw_memtype { FW_MEMTYPE_EDC0 = 0x0, FW_MEMTYPE_EDC1 = 0x1, FW_MEMTYPE_EXTMEM = 0x2, FW_MEMTYPE_FLASH = 0x4, FW_MEMTYPE_INTERNAL = 0x5, FW_MEMTYPE_EXTMEM1 = 0x6, FW_MEMTYPE_HMA = 0x7, }; /****************************************************************************** * W O R K R E Q U E S T s ********************************/ enum fw_wr_opcodes { FW_FRAG_WR = 0x1d, FW_FILTER_WR = 0x02, FW_ULPTX_WR = 0x04, FW_TP_WR = 0x05, FW_ETH_TX_PKT_WR = 0x08, FW_ETH_TX_PKT2_WR = 0x44, FW_ETH_TX_PKTS_WR = 0x09, FW_ETH_TX_PKTS2_WR = 0x78, FW_ETH_TX_EO_WR = 0x1c, FW_EQ_FLUSH_WR = 0x1b, FW_OFLD_CONNECTION_WR = 0x2f, FW_FLOWC_WR = 0x0a, FW_OFLD_TX_DATA_WR = 0x0b, FW_CMD_WR = 0x10, FW_ETH_TX_PKT_VM_WR = 0x11, FW_ETH_TX_PKTS_VM_WR = 0x12, FW_RI_RES_WR = 0x0c, FW_RI_RDMA_WRITE_WR = 0x14, FW_RI_SEND_WR = 0x15, FW_RI_RDMA_READ_WR = 0x16, FW_RI_RECV_WR = 0x17, FW_RI_BIND_MW_WR = 0x18, FW_RI_FR_NSMR_WR = 0x19, FW_RI_FR_NSMR_TPTE_WR = 0x20, FW_RI_RDMA_WRITE_CMPL_WR = 0x21, FW_RI_INV_LSTAG_WR = 0x1a, FW_RI_SEND_IMMEDIATE_WR = 0x15, FW_RI_ATOMIC_WR = 0x16, FW_RI_WR = 0x0d, FW_CHNET_IFCONF_WR = 0x6b, FW_RDEV_WR = 0x38, FW_FOISCSI_NODE_WR = 0x60, FW_FOISCSI_CTRL_WR = 0x6a, FW_FOISCSI_CHAP_WR = 0x6c, FW_FCOE_ELS_CT_WR = 0x30, FW_SCSI_WRITE_WR = 0x31, FW_SCSI_READ_WR = 0x32, FW_SCSI_CMD_WR = 0x33, FW_SCSI_ABRT_CLS_WR = 0x34, FW_SCSI_TGT_ACC_WR = 0x35, FW_SCSI_TGT_XMIT_WR = 0x36, FW_SCSI_TGT_RSP_WR = 0x37, FW_POFCOE_TCB_WR = 0x42, FW_POFCOE_ULPTX_WR = 0x43, FW_ISCSI_TX_DATA_WR = 0x45, FW_PTP_TX_PKT_WR = 0x46, FW_TLSTX_DATA_WR = 0x68, FW_CRYPTO_LOOKASIDE_WR = 0x6d, FW_COISCSI_TGT_WR = 0x70, FW_COISCSI_TGT_CONN_WR = 0x71, FW_COISCSI_TGT_XMIT_WR = 0x72, FW_COISCSI_STATS_WR = 0x73, FW_ISNS_WR = 0x75, FW_ISNS_XMIT_WR = 0x76, FW_FILTER2_WR = 0x77, FW_LASTC2E_WR = 0x80 }; /* * Generic work request header flit0 */ struct fw_wr_hdr { __be32 hi; __be32 lo; }; /* work request opcode (hi) */ #define S_FW_WR_OP 24 #define M_FW_WR_OP 0xff #define V_FW_WR_OP(x) ((x) << S_FW_WR_OP) #define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP) /* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER */ #define S_FW_WR_ATOMIC 23 #define M_FW_WR_ATOMIC 0x1 #define V_FW_WR_ATOMIC(x) ((x) << S_FW_WR_ATOMIC) #define G_FW_WR_ATOMIC(x) \ (((x) >> S_FW_WR_ATOMIC) & M_FW_WR_ATOMIC) #define F_FW_WR_ATOMIC V_FW_WR_ATOMIC(1U) /* flush flag (hi) - firmware flushes flushable work request buffered * in the flow context. */ #define S_FW_WR_FLUSH 22 #define M_FW_WR_FLUSH 0x1 #define V_FW_WR_FLUSH(x) ((x) << S_FW_WR_FLUSH) #define G_FW_WR_FLUSH(x) \ (((x) >> S_FW_WR_FLUSH) & M_FW_WR_FLUSH) #define F_FW_WR_FLUSH V_FW_WR_FLUSH(1U) /* completion flag (hi) - firmware generates a cpl_fw6_ack */ #define S_FW_WR_COMPL 21 #define M_FW_WR_COMPL 0x1 #define V_FW_WR_COMPL(x) ((x) << S_FW_WR_COMPL) #define G_FW_WR_COMPL(x) \ (((x) >> S_FW_WR_COMPL) & M_FW_WR_COMPL) #define F_FW_WR_COMPL V_FW_WR_COMPL(1U) /* work request immediate data lengh (hi) */ #define S_FW_WR_IMMDLEN 0 #define M_FW_WR_IMMDLEN 0xff #define V_FW_WR_IMMDLEN(x) ((x) << S_FW_WR_IMMDLEN) #define G_FW_WR_IMMDLEN(x) \ (((x) >> S_FW_WR_IMMDLEN) & M_FW_WR_IMMDLEN) /* egress queue status update to associated ingress queue entry (lo) */ #define S_FW_WR_EQUIQ 31 #define M_FW_WR_EQUIQ 0x1 #define V_FW_WR_EQUIQ(x) ((x) << S_FW_WR_EQUIQ) #define G_FW_WR_EQUIQ(x) (((x) >> S_FW_WR_EQUIQ) & M_FW_WR_EQUIQ) #define F_FW_WR_EQUIQ V_FW_WR_EQUIQ(1U) /* egress queue status update to egress queue status entry (lo) */ #define S_FW_WR_EQUEQ 30 #define M_FW_WR_EQUEQ 0x1 #define V_FW_WR_EQUEQ(x) ((x) << S_FW_WR_EQUEQ) #define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ) #define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U) /* flow context identifier (lo) */ #define S_FW_WR_FLOWID 8 #define M_FW_WR_FLOWID 0xfffff #define V_FW_WR_FLOWID(x) ((x) << S_FW_WR_FLOWID) #define G_FW_WR_FLOWID(x) (((x) >> S_FW_WR_FLOWID) & M_FW_WR_FLOWID) /* length in units of 16-bytes (lo) */ #define S_FW_WR_LEN16 0 #define M_FW_WR_LEN16 0xff #define V_FW_WR_LEN16(x) ((x) << S_FW_WR_LEN16) #define G_FW_WR_LEN16(x) (((x) >> S_FW_WR_LEN16) & M_FW_WR_LEN16) struct fw_frag_wr { __be32 op_to_fragoff16; __be32 flowid_len16; __be64 r4; }; #define S_FW_FRAG_WR_EOF 15 #define M_FW_FRAG_WR_EOF 0x1 #define V_FW_FRAG_WR_EOF(x) ((x) << S_FW_FRAG_WR_EOF) #define G_FW_FRAG_WR_EOF(x) (((x) >> S_FW_FRAG_WR_EOF) & M_FW_FRAG_WR_EOF) #define F_FW_FRAG_WR_EOF V_FW_FRAG_WR_EOF(1U) #define S_FW_FRAG_WR_FRAGOFF16 8 #define M_FW_FRAG_WR_FRAGOFF16 0x7f #define V_FW_FRAG_WR_FRAGOFF16(x) ((x) << S_FW_FRAG_WR_FRAGOFF16) #define G_FW_FRAG_WR_FRAGOFF16(x) \ (((x) >> S_FW_FRAG_WR_FRAGOFF16) & M_FW_FRAG_WR_FRAGOFF16) /* valid filter configurations for compressed tuple * Encodings: TPL - Compressed TUPLE for filter in addition to 4-tuple * FR - FRAGMENT, FC - FCoE, MT - MPS MATCH TYPE, M - MPS MATCH, * E - Ethertype, P - Port, PR - Protocol, T - TOS, IV - Inner VLAN, * OV - Outer VLAN/VNIC_ID, */ #define HW_TPL_FR_MT_M_E_P_FC 0x3C3 #define HW_TPL_FR_MT_M_PR_T_FC 0x3B3 #define HW_TPL_FR_MT_M_IV_P_FC 0x38B #define HW_TPL_FR_MT_M_OV_P_FC 0x387 #define HW_TPL_FR_MT_E_PR_T 0x370 #define HW_TPL_FR_MT_E_PR_P_FC 0X363 #define HW_TPL_FR_MT_E_T_P_FC 0X353 #define HW_TPL_FR_MT_PR_IV_P_FC 0X32B #define HW_TPL_FR_MT_PR_OV_P_FC 0X327 #define HW_TPL_FR_MT_T_IV_P_FC 0X31B #define HW_TPL_FR_MT_T_OV_P_FC 0X317 #define HW_TPL_FR_M_E_PR_FC 0X2E1 #define HW_TPL_FR_M_E_T_FC 0X2D1 #define HW_TPL_FR_M_PR_IV_FC 0X2A9 #define HW_TPL_FR_M_PR_OV_FC 0X2A5 #define HW_TPL_FR_M_T_IV_FC 0X299 #define HW_TPL_FR_M_T_OV_FC 0X295 #define HW_TPL_FR_E_PR_T_P 0X272 #define HW_TPL_FR_E_PR_T_FC 0X271 #define HW_TPL_FR_E_IV_FC 0X249 #define HW_TPL_FR_E_OV_FC 0X245 #define HW_TPL_FR_PR_T_IV_FC 0X239 #define HW_TPL_FR_PR_T_OV_FC 0X235 #define HW_TPL_FR_IV_OV_FC 0X20D #define HW_TPL_MT_M_E_PR 0X1E0 #define HW_TPL_MT_M_E_T 0X1D0 #define HW_TPL_MT_E_PR_T_FC 0X171 #define HW_TPL_MT_E_IV 0X148 #define HW_TPL_MT_E_OV 0X144 #define HW_TPL_MT_PR_T_IV 0X138 #define HW_TPL_MT_PR_T_OV 0X134 #define HW_TPL_M_E_PR_P 0X0E2 #define HW_TPL_M_E_T_P 0X0D2 #define HW_TPL_E_PR_T_P_FC 0X073 #define HW_TPL_E_IV_P 0X04A #define HW_TPL_E_OV_P 0X046 #define HW_TPL_PR_T_IV_P 0X03A #define HW_TPL_PR_T_OV_P 0X036 /* filter wr reply code in cookie in CPL_SET_TCB_RPL */ enum fw_filter_wr_cookie { FW_FILTER_WR_SUCCESS, FW_FILTER_WR_FLT_ADDED, FW_FILTER_WR_FLT_DELETED, FW_FILTER_WR_SMT_TBL_FULL, FW_FILTER_WR_EINVAL, }; enum fw_filter_wr_nat_mode { FW_FILTER_WR_NATMODE_NONE = 0, FW_FILTER_WR_NATMODE_DIP , FW_FILTER_WR_NATMODE_DIPDP, FW_FILTER_WR_NATMODE_DIPDPSIP, FW_FILTER_WR_NATMODE_DIPDPSP, FW_FILTER_WR_NATMODE_SIPSP, FW_FILTER_WR_NATMODE_DIPSIPSP, FW_FILTER_WR_NATMODE_FOURTUPLE, }; struct fw_filter_wr { __be32 op_pkd; __be32 len16_pkd; __be64 r3; __be32 tid_to_iq; __be32 del_filter_to_l2tix; __be16 ethtype; __be16 ethtypem; __u8 frag_to_ovlan_vldm; __u8 smac_sel; __be16 rx_chan_rx_rpl_iq; __be32 maci_to_matchtypem; __u8 ptcl; __u8 ptclm; __u8 ttyp; __u8 ttypm; __be16 ivlan; __be16 ivlanm; __be16 ovlan; __be16 ovlanm; __u8 lip[16]; __u8 lipm[16]; __u8 fip[16]; __u8 fipm[16]; __be16 lp; __be16 lpm; __be16 fp; __be16 fpm; __be16 r7; __u8 sma[6]; }; struct fw_filter2_wr { __be32 op_pkd; __be32 len16_pkd; __be64 r3; __be32 tid_to_iq; __be32 del_filter_to_l2tix; __be16 ethtype; __be16 ethtypem; __u8 frag_to_ovlan_vldm; __u8 smac_sel; __be16 rx_chan_rx_rpl_iq; __be32 maci_to_matchtypem; __u8 ptcl; __u8 ptclm; __u8 ttyp; __u8 ttypm; __be16 ivlan; __be16 ivlanm; __be16 ovlan; __be16 ovlanm; __u8 lip[16]; __u8 lipm[16]; __u8 fip[16]; __u8 fipm[16]; __be16 lp; __be16 lpm; __be16 fp; __be16 fpm; __be16 r7; __u8 sma[6]; __be16 r8; __u8 filter_type_swapmac; __u8 natmode_to_ulp_type; __be16 newlport; __be16 newfport; __u8 newlip[16]; __u8 newfip[16]; __be32 natseqcheck; __be32 r9; __be64 r10; __be64 r11; __be64 r12; __be64 r13; }; #define S_FW_FILTER_WR_TID 12 #define M_FW_FILTER_WR_TID 0xfffff #define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID) #define G_FW_FILTER_WR_TID(x) \ (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID) #define S_FW_FILTER_WR_RQTYPE 11 #define M_FW_FILTER_WR_RQTYPE 0x1 #define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE) #define G_FW_FILTER_WR_RQTYPE(x) \ (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE) #define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U) #define S_FW_FILTER_WR_NOREPLY 10 #define M_FW_FILTER_WR_NOREPLY 0x1 #define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY) #define G_FW_FILTER_WR_NOREPLY(x) \ (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY) #define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U) #define S_FW_FILTER_WR_IQ 0 #define M_FW_FILTER_WR_IQ 0x3ff #define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ) #define G_FW_FILTER_WR_IQ(x) \ (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ) #define S_FW_FILTER_WR_DEL_FILTER 31 #define M_FW_FILTER_WR_DEL_FILTER 0x1 #define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER) #define G_FW_FILTER_WR_DEL_FILTER(x) \ (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER) #define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U) #define S_FW_FILTER2_WR_DROP_ENCAP 30 #define M_FW_FILTER2_WR_DROP_ENCAP 0x1 #define V_FW_FILTER2_WR_DROP_ENCAP(x) ((x) << S_FW_FILTER2_WR_DROP_ENCAP) #define G_FW_FILTER2_WR_DROP_ENCAP(x) \ (((x) >> S_FW_FILTER2_WR_DROP_ENCAP) & M_FW_FILTER2_WR_DROP_ENCAP) #define F_FW_FILTER2_WR_DROP_ENCAP V_FW_FILTER2_WR_DROP_ENCAP(1U) #define S_FW_FILTER2_WR_TX_LOOP 29 #define M_FW_FILTER2_WR_TX_LOOP 0x1 #define V_FW_FILTER2_WR_TX_LOOP(x) ((x) << S_FW_FILTER2_WR_TX_LOOP) #define G_FW_FILTER2_WR_TX_LOOP(x) \ (((x) >> S_FW_FILTER2_WR_TX_LOOP) & M_FW_FILTER2_WR_TX_LOOP) #define F_FW_FILTER2_WR_TX_LOOP V_FW_FILTER2_WR_TX_LOOP(1U) #define S_FW_FILTER_WR_RPTTID 25 #define M_FW_FILTER_WR_RPTTID 0x1 #define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID) #define G_FW_FILTER_WR_RPTTID(x) \ (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID) #define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U) #define S_FW_FILTER_WR_DROP 24 #define M_FW_FILTER_WR_DROP 0x1 #define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP) #define G_FW_FILTER_WR_DROP(x) \ (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP) #define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U) #define S_FW_FILTER_WR_DIRSTEER 23 #define M_FW_FILTER_WR_DIRSTEER 0x1 #define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER) #define G_FW_FILTER_WR_DIRSTEER(x) \ (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER) #define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U) #define S_FW_FILTER_WR_MASKHASH 22 #define M_FW_FILTER_WR_MASKHASH 0x1 #define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH) #define G_FW_FILTER_WR_MASKHASH(x) \ (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH) #define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U) #define S_FW_FILTER_WR_DIRSTEERHASH 21 #define M_FW_FILTER_WR_DIRSTEERHASH 0x1 #define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH) #define G_FW_FILTER_WR_DIRSTEERHASH(x) \ (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH) #define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U) #define S_FW_FILTER_WR_LPBK 20 #define M_FW_FILTER_WR_LPBK 0x1 #define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK) #define G_FW_FILTER_WR_LPBK(x) \ (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK) #define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U) #define S_FW_FILTER_WR_DMAC 19 #define M_FW_FILTER_WR_DMAC 0x1 #define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC) #define G_FW_FILTER_WR_DMAC(x) \ (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC) #define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U) #define S_FW_FILTER_WR_SMAC 18 #define M_FW_FILTER_WR_SMAC 0x1 #define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC) #define G_FW_FILTER_WR_SMAC(x) \ (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC) #define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U) #define S_FW_FILTER_WR_INSVLAN 17 #define M_FW_FILTER_WR_INSVLAN 0x1 #define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN) #define G_FW_FILTER_WR_INSVLAN(x) \ (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN) #define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U) #define S_FW_FILTER_WR_RMVLAN 16 #define M_FW_FILTER_WR_RMVLAN 0x1 #define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN) #define G_FW_FILTER_WR_RMVLAN(x) \ (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN) #define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U) #define S_FW_FILTER_WR_HITCNTS 15 #define M_FW_FILTER_WR_HITCNTS 0x1 #define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS) #define G_FW_FILTER_WR_HITCNTS(x) \ (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS) #define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U) #define S_FW_FILTER_WR_TXCHAN 13 #define M_FW_FILTER_WR_TXCHAN 0x3 #define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN) #define G_FW_FILTER_WR_TXCHAN(x) \ (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN) #define S_FW_FILTER_WR_PRIO 12 #define M_FW_FILTER_WR_PRIO 0x1 #define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO) #define G_FW_FILTER_WR_PRIO(x) \ (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO) #define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U) #define S_FW_FILTER_WR_L2TIX 0 #define M_FW_FILTER_WR_L2TIX 0xfff #define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX) #define G_FW_FILTER_WR_L2TIX(x) \ (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX) #define S_FW_FILTER_WR_FRAG 7 #define M_FW_FILTER_WR_FRAG 0x1 #define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG) #define G_FW_FILTER_WR_FRAG(x) \ (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG) #define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U) #define S_FW_FILTER_WR_FRAGM 6 #define M_FW_FILTER_WR_FRAGM 0x1 #define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM) #define G_FW_FILTER_WR_FRAGM(x) \ (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM) #define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U) #define S_FW_FILTER_WR_IVLAN_VLD 5 #define M_FW_FILTER_WR_IVLAN_VLD 0x1 #define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD) #define G_FW_FILTER_WR_IVLAN_VLD(x) \ (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD) #define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U) #define S_FW_FILTER_WR_OVLAN_VLD 4 #define M_FW_FILTER_WR_OVLAN_VLD 0x1 #define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD) #define G_FW_FILTER_WR_OVLAN_VLD(x) \ (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD) #define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U) #define S_FW_FILTER_WR_IVLAN_VLDM 3 #define M_FW_FILTER_WR_IVLAN_VLDM 0x1 #define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM) #define G_FW_FILTER_WR_IVLAN_VLDM(x) \ (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM) #define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U) #define S_FW_FILTER_WR_OVLAN_VLDM 2 #define M_FW_FILTER_WR_OVLAN_VLDM 0x1 #define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM) #define G_FW_FILTER_WR_OVLAN_VLDM(x) \ (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM) #define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U) #define S_FW_FILTER_WR_RX_CHAN 15 #define M_FW_FILTER_WR_RX_CHAN 0x1 #define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN) #define G_FW_FILTER_WR_RX_CHAN(x) \ (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN) #define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U) #define S_FW_FILTER_WR_RX_RPL_IQ 0 #define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff #define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ) #define G_FW_FILTER_WR_RX_RPL_IQ(x) \ (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ) #define S_FW_FILTER2_WR_FILTER_TYPE 1 #define M_FW_FILTER2_WR_FILTER_TYPE 0x1 #define V_FW_FILTER2_WR_FILTER_TYPE(x) ((x) << S_FW_FILTER2_WR_FILTER_TYPE) #define G_FW_FILTER2_WR_FILTER_TYPE(x) \ (((x) >> S_FW_FILTER2_WR_FILTER_TYPE) & M_FW_FILTER2_WR_FILTER_TYPE) #define F_FW_FILTER2_WR_FILTER_TYPE V_FW_FILTER2_WR_FILTER_TYPE(1U) #define S_FW_FILTER2_WR_SWAPMAC 0 #define M_FW_FILTER2_WR_SWAPMAC 0x1 #define V_FW_FILTER2_WR_SWAPMAC(x) ((x) << S_FW_FILTER2_WR_SWAPMAC) #define G_FW_FILTER2_WR_SWAPMAC(x) \ (((x) >> S_FW_FILTER2_WR_SWAPMAC) & M_FW_FILTER2_WR_SWAPMAC) #define F_FW_FILTER2_WR_SWAPMAC V_FW_FILTER2_WR_SWAPMAC(1U) #define S_FW_FILTER2_WR_NATMODE 5 #define M_FW_FILTER2_WR_NATMODE 0x7 #define V_FW_FILTER2_WR_NATMODE(x) ((x) << S_FW_FILTER2_WR_NATMODE) #define G_FW_FILTER2_WR_NATMODE(x) \ (((x) >> S_FW_FILTER2_WR_NATMODE) & M_FW_FILTER2_WR_NATMODE) #define S_FW_FILTER2_WR_NATFLAGCHECK 4 #define M_FW_FILTER2_WR_NATFLAGCHECK 0x1 #define V_FW_FILTER2_WR_NATFLAGCHECK(x) ((x) << S_FW_FILTER2_WR_NATFLAGCHECK) #define G_FW_FILTER2_WR_NATFLAGCHECK(x) \ (((x) >> S_FW_FILTER2_WR_NATFLAGCHECK) & M_FW_FILTER2_WR_NATFLAGCHECK) #define F_FW_FILTER2_WR_NATFLAGCHECK V_FW_FILTER2_WR_NATFLAGCHECK(1U) #define S_FW_FILTER2_WR_ULP_TYPE 0 #define M_FW_FILTER2_WR_ULP_TYPE 0xf #define V_FW_FILTER2_WR_ULP_TYPE(x) ((x) << S_FW_FILTER2_WR_ULP_TYPE) #define G_FW_FILTER2_WR_ULP_TYPE(x) \ (((x) >> S_FW_FILTER2_WR_ULP_TYPE) & M_FW_FILTER2_WR_ULP_TYPE) #define S_FW_FILTER_WR_MACI 23 #define M_FW_FILTER_WR_MACI 0x1ff #define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI) #define G_FW_FILTER_WR_MACI(x) \ (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI) #define S_FW_FILTER_WR_MACIM 14 #define M_FW_FILTER_WR_MACIM 0x1ff #define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM) #define G_FW_FILTER_WR_MACIM(x) \ (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM) #define S_FW_FILTER_WR_FCOE 13 #define M_FW_FILTER_WR_FCOE 0x1 #define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE) #define G_FW_FILTER_WR_FCOE(x) \ (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE) #define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U) #define S_FW_FILTER_WR_FCOEM 12 #define M_FW_FILTER_WR_FCOEM 0x1 #define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM) #define G_FW_FILTER_WR_FCOEM(x) \ (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM) #define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U) #define S_FW_FILTER_WR_PORT 9 #define M_FW_FILTER_WR_PORT 0x7 #define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT) #define G_FW_FILTER_WR_PORT(x) \ (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT) #define S_FW_FILTER_WR_PORTM 6 #define M_FW_FILTER_WR_PORTM 0x7 #define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM) #define G_FW_FILTER_WR_PORTM(x) \ (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM) #define S_FW_FILTER_WR_MATCHTYPE 3 #define M_FW_FILTER_WR_MATCHTYPE 0x7 #define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE) #define G_FW_FILTER_WR_MATCHTYPE(x) \ (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE) #define S_FW_FILTER_WR_MATCHTYPEM 0 #define M_FW_FILTER_WR_MATCHTYPEM 0x7 #define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM) #define G_FW_FILTER_WR_MATCHTYPEM(x) \ (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM) struct fw_ulptx_wr { __be32 op_to_compl; __be32 flowid_len16; __u64 cookie; }; /* flag for packet type - control packet (0), data packet (1) */ #define S_FW_ULPTX_WR_DATA 28 #define M_FW_ULPTX_WR_DATA 0x1 #define V_FW_ULPTX_WR_DATA(x) ((x) << S_FW_ULPTX_WR_DATA) #define G_FW_ULPTX_WR_DATA(x) \ (((x) >> S_FW_ULPTX_WR_DATA) & M_FW_ULPTX_WR_DATA) #define F_FW_ULPTX_WR_DATA V_FW_ULPTX_WR_DATA(1U) struct fw_tp_wr { __be32 op_to_immdlen; __be32 flowid_len16; __u64 cookie; }; struct fw_eth_tx_pkt_wr { __be32 op_immdlen; __be32 equiq_to_len16; __be64 r3; }; #define S_FW_ETH_TX_PKT_WR_IMMDLEN 0 #define M_FW_ETH_TX_PKT_WR_IMMDLEN 0x1ff #define V_FW_ETH_TX_PKT_WR_IMMDLEN(x) ((x) << S_FW_ETH_TX_PKT_WR_IMMDLEN) #define G_FW_ETH_TX_PKT_WR_IMMDLEN(x) \ (((x) >> S_FW_ETH_TX_PKT_WR_IMMDLEN) & M_FW_ETH_TX_PKT_WR_IMMDLEN) struct fw_eth_tx_pkt2_wr { __be32 op_immdlen; __be32 equiq_to_len16; __be32 r3; __be32 L4ChkDisable_to_IpHdrLen; }; #define S_FW_ETH_TX_PKT2_WR_IMMDLEN 0 #define M_FW_ETH_TX_PKT2_WR_IMMDLEN 0x1ff #define V_FW_ETH_TX_PKT2_WR_IMMDLEN(x) ((x) << S_FW_ETH_TX_PKT2_WR_IMMDLEN) #define G_FW_ETH_TX_PKT2_WR_IMMDLEN(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_IMMDLEN) & M_FW_ETH_TX_PKT2_WR_IMMDLEN) #define S_FW_ETH_TX_PKT2_WR_L4CHKDISABLE 31 #define M_FW_ETH_TX_PKT2_WR_L4CHKDISABLE 0x1 #define V_FW_ETH_TX_PKT2_WR_L4CHKDISABLE(x) \ ((x) << S_FW_ETH_TX_PKT2_WR_L4CHKDISABLE) #define G_FW_ETH_TX_PKT2_WR_L4CHKDISABLE(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_L4CHKDISABLE) & \ M_FW_ETH_TX_PKT2_WR_L4CHKDISABLE) #define F_FW_ETH_TX_PKT2_WR_L4CHKDISABLE \ V_FW_ETH_TX_PKT2_WR_L4CHKDISABLE(1U) #define S_FW_ETH_TX_PKT2_WR_L3CHKDISABLE 30 #define M_FW_ETH_TX_PKT2_WR_L3CHKDISABLE 0x1 #define V_FW_ETH_TX_PKT2_WR_L3CHKDISABLE(x) \ ((x) << S_FW_ETH_TX_PKT2_WR_L3CHKDISABLE) #define G_FW_ETH_TX_PKT2_WR_L3CHKDISABLE(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_L3CHKDISABLE) & \ M_FW_ETH_TX_PKT2_WR_L3CHKDISABLE) #define F_FW_ETH_TX_PKT2_WR_L3CHKDISABLE \ V_FW_ETH_TX_PKT2_WR_L3CHKDISABLE(1U) #define S_FW_ETH_TX_PKT2_WR_IVLAN 28 #define M_FW_ETH_TX_PKT2_WR_IVLAN 0x1 #define V_FW_ETH_TX_PKT2_WR_IVLAN(x) ((x) << S_FW_ETH_TX_PKT2_WR_IVLAN) #define G_FW_ETH_TX_PKT2_WR_IVLAN(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_IVLAN) & M_FW_ETH_TX_PKT2_WR_IVLAN) #define F_FW_ETH_TX_PKT2_WR_IVLAN V_FW_ETH_TX_PKT2_WR_IVLAN(1U) #define S_FW_ETH_TX_PKT2_WR_IVLANTAG 12 #define M_FW_ETH_TX_PKT2_WR_IVLANTAG 0xffff #define V_FW_ETH_TX_PKT2_WR_IVLANTAG(x) ((x) << S_FW_ETH_TX_PKT2_WR_IVLANTAG) #define G_FW_ETH_TX_PKT2_WR_IVLANTAG(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_IVLANTAG) & M_FW_ETH_TX_PKT2_WR_IVLANTAG) #define S_FW_ETH_TX_PKT2_WR_CHKTYPE 8 #define M_FW_ETH_TX_PKT2_WR_CHKTYPE 0xf #define V_FW_ETH_TX_PKT2_WR_CHKTYPE(x) ((x) << S_FW_ETH_TX_PKT2_WR_CHKTYPE) #define G_FW_ETH_TX_PKT2_WR_CHKTYPE(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_CHKTYPE) & M_FW_ETH_TX_PKT2_WR_CHKTYPE) #define S_FW_ETH_TX_PKT2_WR_IPHDRLEN 0 #define M_FW_ETH_TX_PKT2_WR_IPHDRLEN 0xff #define V_FW_ETH_TX_PKT2_WR_IPHDRLEN(x) ((x) << S_FW_ETH_TX_PKT2_WR_IPHDRLEN) #define G_FW_ETH_TX_PKT2_WR_IPHDRLEN(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_IPHDRLEN) & M_FW_ETH_TX_PKT2_WR_IPHDRLEN) struct fw_eth_tx_pkts_wr { __be32 op_pkd; __be32 equiq_to_len16; __be32 r3; __be16 plen; __u8 npkt; __u8 type; }; #define S_FW_PTP_TX_PKT_WR_IMMDLEN 0 #define M_FW_PTP_TX_PKT_WR_IMMDLEN 0x1ff #define V_FW_PTP_TX_PKT_WR_IMMDLEN(x) ((x) << S_FW_PTP_TX_PKT_WR_IMMDLEN) #define G_FW_PTP_TX_PKT_WR_IMMDLEN(x) \ (((x) >> S_FW_PTP_TX_PKT_WR_IMMDLEN) & M_FW_PTP_TX_PKT_WR_IMMDLEN) struct fw_eth_tx_pkt_ptp_wr { __be32 op_immdlen; __be32 equiq_to_len16; __be64 r3; }; enum fw_eth_tx_eo_type { FW_ETH_TX_EO_TYPE_UDPSEG, FW_ETH_TX_EO_TYPE_TCPSEG, FW_ETH_TX_EO_TYPE_NVGRESEG, FW_ETH_TX_EO_TYPE_VXLANSEG, FW_ETH_TX_EO_TYPE_GENEVESEG, }; struct fw_eth_tx_eo_wr { __be32 op_immdlen; __be32 equiq_to_len16; __be64 r3; union fw_eth_tx_eo { struct fw_eth_tx_eo_udpseg { __u8 type; __u8 ethlen; __be16 iplen; __u8 udplen; __u8 rtplen; __be16 r4; __be16 mss; __be16 schedpktsize; __be32 plen; } udpseg; struct fw_eth_tx_eo_tcpseg { __u8 type; __u8 ethlen; __be16 iplen; __u8 tcplen; __u8 tsclk_tsoff; __be16 r4; __be16 mss; __be16 r5; __be32 plen; } tcpseg; struct fw_eth_tx_eo_nvgreseg { __u8 type; __u8 iphdroffout; __be16 grehdroff; __be16 iphdroffin; __be16 tcphdroffin; __be16 mss; __be16 r4; __be32 plen; } nvgreseg; struct fw_eth_tx_eo_vxlanseg { __u8 type; __u8 iphdroffout; __be16 vxlanhdroff; __be16 iphdroffin; __be16 tcphdroffin; __be16 mss; __be16 r4; __be32 plen; } vxlanseg; struct fw_eth_tx_eo_geneveseg { __u8 type; __u8 iphdroffout; __be16 genevehdroff; __be16 iphdroffin; __be16 tcphdroffin; __be16 mss; __be16 r4; __be32 plen; } geneveseg; } u; }; #define S_FW_ETH_TX_EO_WR_IMMDLEN 0 #define M_FW_ETH_TX_EO_WR_IMMDLEN 0x1ff #define V_FW_ETH_TX_EO_WR_IMMDLEN(x) ((x) << S_FW_ETH_TX_EO_WR_IMMDLEN) #define G_FW_ETH_TX_EO_WR_IMMDLEN(x) \ (((x) >> S_FW_ETH_TX_EO_WR_IMMDLEN) & M_FW_ETH_TX_EO_WR_IMMDLEN) #define S_FW_ETH_TX_EO_WR_TSCLK 6 #define M_FW_ETH_TX_EO_WR_TSCLK 0x3 #define V_FW_ETH_TX_EO_WR_TSCLK(x) ((x) << S_FW_ETH_TX_EO_WR_TSCLK) #define G_FW_ETH_TX_EO_WR_TSCLK(x) \ (((x) >> S_FW_ETH_TX_EO_WR_TSCLK) & M_FW_ETH_TX_EO_WR_TSCLK) #define S_FW_ETH_TX_EO_WR_TSOFF 0 #define M_FW_ETH_TX_EO_WR_TSOFF 0x3f #define V_FW_ETH_TX_EO_WR_TSOFF(x) ((x) << S_FW_ETH_TX_EO_WR_TSOFF) #define G_FW_ETH_TX_EO_WR_TSOFF(x) \ (((x) >> S_FW_ETH_TX_EO_WR_TSOFF) & M_FW_ETH_TX_EO_WR_TSOFF) struct fw_eq_flush_wr { __u8 opcode; __u8 r1[3]; __be32 equiq_to_len16; __be64 r3; }; struct fw_ofld_connection_wr { __be32 op_compl; __be32 len16_pkd; __u64 cookie; __be64 r2; __be64 r3; struct fw_ofld_connection_le { __be32 version_cpl; __be32 filter; __be32 r1; __be16 lport; __be16 pport; union fw_ofld_connection_leip { struct fw_ofld_connection_le_ipv4 { __be32 pip; __be32 lip; __be64 r0; __be64 r1; __be64 r2; } ipv4; struct fw_ofld_connection_le_ipv6 { __be64 pip_hi; __be64 pip_lo; __be64 lip_hi; __be64 lip_lo; } ipv6; } u; } le; struct fw_ofld_connection_tcb { __be32 t_state_to_astid; __be16 cplrxdataack_cplpassacceptrpl; __be16 rcv_adv; __be32 rcv_nxt; __be32 tx_max; __be64 opt0; __be32 opt2; __be32 r1; __be64 r2; __be64 r3; } tcb; }; #define S_FW_OFLD_CONNECTION_WR_VERSION 31 #define M_FW_OFLD_CONNECTION_WR_VERSION 0x1 #define V_FW_OFLD_CONNECTION_WR_VERSION(x) \ ((x) << S_FW_OFLD_CONNECTION_WR_VERSION) #define G_FW_OFLD_CONNECTION_WR_VERSION(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \ M_FW_OFLD_CONNECTION_WR_VERSION) #define F_FW_OFLD_CONNECTION_WR_VERSION V_FW_OFLD_CONNECTION_WR_VERSION(1U) #define S_FW_OFLD_CONNECTION_WR_CPL 30 #define M_FW_OFLD_CONNECTION_WR_CPL 0x1 #define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL) #define G_FW_OFLD_CONNECTION_WR_CPL(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL) #define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U) #define S_FW_OFLD_CONNECTION_WR_T_STATE 28 #define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf #define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \ ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE) #define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \ M_FW_OFLD_CONNECTION_WR_T_STATE) #define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24 #define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf #define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \ ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE) #define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \ M_FW_OFLD_CONNECTION_WR_RCV_SCALE) #define S_FW_OFLD_CONNECTION_WR_ASTID 0 #define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff #define V_FW_OFLD_CONNECTION_WR_ASTID(x) \ ((x) << S_FW_OFLD_CONNECTION_WR_ASTID) #define G_FW_OFLD_CONNECTION_WR_ASTID(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID) #define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15 #define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1 #define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \ ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) #define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \ M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) #define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \ V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U) #define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14 #define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1 #define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \ ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) #define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \ M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) #define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \ V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U) enum fw_flowc_mnem_tcpstate { FW_FLOWC_MNEM_TCPSTATE_CLOSED = 0, /* illegal */ FW_FLOWC_MNEM_TCPSTATE_LISTEN = 1, /* illegal */ FW_FLOWC_MNEM_TCPSTATE_SYNSENT = 2, /* illegal */ FW_FLOWC_MNEM_TCPSTATE_SYNRECEIVED = 3, /* illegal */ FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED = 4, /* default */ FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT = 5, /* got peer close already */ FW_FLOWC_MNEM_TCPSTATE_FINWAIT1 = 6, /* haven't gotten ACK for FIN and * will resend FIN - equiv ESTAB */ FW_FLOWC_MNEM_TCPSTATE_CLOSING = 7, /* haven't gotten ACK for FIN and * will resend FIN but have * received FIN */ FW_FLOWC_MNEM_TCPSTATE_LASTACK = 8, /* haven't gotten ACK for FIN and * will resend FIN but have * received FIN */ FW_FLOWC_MNEM_TCPSTATE_FINWAIT2 = 9, /* sent FIN and got FIN + ACK, * waiting for FIN */ FW_FLOWC_MNEM_TCPSTATE_TIMEWAIT = 10, /* not expected */ }; enum fw_flowc_mnem_eostate { FW_FLOWC_MNEM_EOSTATE_CLOSED = 0, /* illegal */ FW_FLOWC_MNEM_EOSTATE_ESTABLISHED = 1, /* default */ FW_FLOWC_MNEM_EOSTATE_CLOSING = 2, /* graceful close, after sending * outstanding payload */ FW_FLOWC_MNEM_EOSTATE_ABORTING = 3, /* immediate close, after * discarding outstanding payload */ }; enum fw_flowc_mnem { FW_FLOWC_MNEM_PFNVFN = 0, /* PFN [15:8] VFN [7:0] */ FW_FLOWC_MNEM_CH = 1, FW_FLOWC_MNEM_PORT = 2, FW_FLOWC_MNEM_IQID = 3, FW_FLOWC_MNEM_SNDNXT = 4, FW_FLOWC_MNEM_RCVNXT = 5, FW_FLOWC_MNEM_SNDBUF = 6, FW_FLOWC_MNEM_MSS = 7, FW_FLOWC_MNEM_TXDATAPLEN_MAX = 8, FW_FLOWC_MNEM_TCPSTATE = 9, FW_FLOWC_MNEM_EOSTATE = 10, FW_FLOWC_MNEM_SCHEDCLASS = 11, FW_FLOWC_MNEM_DCBPRIO = 12, FW_FLOWC_MNEM_SND_SCALE = 13, FW_FLOWC_MNEM_RCV_SCALE = 14, FW_FLOWC_MNEM_ULP_MODE = 15, FW_FLOWC_MNEM_MAX = 16, }; struct fw_flowc_mnemval { __u8 mnemonic; __u8 r4[3]; __be32 val; }; struct fw_flowc_wr { __be32 op_to_nparams; __be32 flowid_len16; #ifndef C99_NOT_SUPPORTED struct fw_flowc_mnemval mnemval[0]; #endif }; #define S_FW_FLOWC_WR_NPARAMS 0 #define M_FW_FLOWC_WR_NPARAMS 0xff #define V_FW_FLOWC_WR_NPARAMS(x) ((x) << S_FW_FLOWC_WR_NPARAMS) #define G_FW_FLOWC_WR_NPARAMS(x) \ (((x) >> S_FW_FLOWC_WR_NPARAMS) & M_FW_FLOWC_WR_NPARAMS) struct fw_ofld_tx_data_wr { __be32 op_to_immdlen; __be32 flowid_len16; __be32 plen; __be32 lsodisable_to_flags; }; #define S_FW_OFLD_TX_DATA_WR_LSODISABLE 31 #define M_FW_OFLD_TX_DATA_WR_LSODISABLE 0x1 #define V_FW_OFLD_TX_DATA_WR_LSODISABLE(x) \ ((x) << S_FW_OFLD_TX_DATA_WR_LSODISABLE) #define G_FW_OFLD_TX_DATA_WR_LSODISABLE(x) \ (((x) >> S_FW_OFLD_TX_DATA_WR_LSODISABLE) & \ M_FW_OFLD_TX_DATA_WR_LSODISABLE) #define F_FW_OFLD_TX_DATA_WR_LSODISABLE V_FW_OFLD_TX_DATA_WR_LSODISABLE(1U) #define S_FW_OFLD_TX_DATA_WR_ALIGNPLD 30 #define M_FW_OFLD_TX_DATA_WR_ALIGNPLD 0x1 #define V_FW_OFLD_TX_DATA_WR_ALIGNPLD(x) \ ((x) << S_FW_OFLD_TX_DATA_WR_ALIGNPLD) #define G_FW_OFLD_TX_DATA_WR_ALIGNPLD(x) \ (((x) >> S_FW_OFLD_TX_DATA_WR_ALIGNPLD) & M_FW_OFLD_TX_DATA_WR_ALIGNPLD) #define F_FW_OFLD_TX_DATA_WR_ALIGNPLD V_FW_OFLD_TX_DATA_WR_ALIGNPLD(1U) #define S_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE 29 #define M_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE 0x1 #define V_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE(x) \ ((x) << S_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE) #define G_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE(x) \ (((x) >> S_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE) & \ M_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE) #define F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE \ V_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE(1U) #define S_FW_OFLD_TX_DATA_WR_FLAGS 0 #define M_FW_OFLD_TX_DATA_WR_FLAGS 0xfffffff #define V_FW_OFLD_TX_DATA_WR_FLAGS(x) ((x) << S_FW_OFLD_TX_DATA_WR_FLAGS) #define G_FW_OFLD_TX_DATA_WR_FLAGS(x) \ (((x) >> S_FW_OFLD_TX_DATA_WR_FLAGS) & M_FW_OFLD_TX_DATA_WR_FLAGS) /* Use fw_ofld_tx_data_wr structure */ #define S_FW_ISCSI_TX_DATA_WR_FLAGS_HI 10 #define M_FW_ISCSI_TX_DATA_WR_FLAGS_HI 0x3fffff #define V_FW_ISCSI_TX_DATA_WR_FLAGS_HI(x) \ ((x) << S_FW_ISCSI_TX_DATA_WR_FLAGS_HI) #define G_FW_ISCSI_TX_DATA_WR_FLAGS_HI(x) \ (((x) >> S_FW_ISCSI_TX_DATA_WR_FLAGS_HI) & M_FW_ISCSI_TX_DATA_WR_FLAGS_HI) #define S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO 9 #define M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO 0x1 #define V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO(x) \ ((x) << S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO) #define G_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO(x) \ (((x) >> S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO) & \ M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO) #define F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO \ V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO(1U) #define S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI 8 #define M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI 0x1 #define V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI(x) \ ((x) << S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI) #define G_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI(x) \ (((x) >> S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI) & \ M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI) #define F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI \ V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI(1U) #define S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC 7 #define M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC 0x1 #define V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC(x) \ ((x) << S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC) #define G_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC(x) \ (((x) >> S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC) & \ M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC) #define F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC \ V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC(1U) #define S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC 6 #define M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC 0x1 #define V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC(x) \ ((x) << S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC) #define G_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC(x) \ (((x) >> S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC) & \ M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC) #define F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC \ V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC(1U) #define S_FW_ISCSI_TX_DATA_WR_FLAGS_LO 0 #define M_FW_ISCSI_TX_DATA_WR_FLAGS_LO 0x3f #define V_FW_ISCSI_TX_DATA_WR_FLAGS_LO(x) \ ((x) << S_FW_ISCSI_TX_DATA_WR_FLAGS_LO) #define G_FW_ISCSI_TX_DATA_WR_FLAGS_LO(x) \ (((x) >> S_FW_ISCSI_TX_DATA_WR_FLAGS_LO) & M_FW_ISCSI_TX_DATA_WR_FLAGS_LO) struct fw_cmd_wr { __be32 op_dma; __be32 len16_pkd; __be64 cookie_daddr; }; #define S_FW_CMD_WR_DMA 17 #define M_FW_CMD_WR_DMA 0x1 #define V_FW_CMD_WR_DMA(x) ((x) << S_FW_CMD_WR_DMA) #define G_FW_CMD_WR_DMA(x) (((x) >> S_FW_CMD_WR_DMA) & M_FW_CMD_WR_DMA) #define F_FW_CMD_WR_DMA V_FW_CMD_WR_DMA(1U) struct fw_eth_tx_pkt_vm_wr { __be32 op_immdlen; __be32 equiq_to_len16; __be32 r3[2]; __u8 ethmacdst[6]; __u8 ethmacsrc[6]; __be16 ethtype; __be16 vlantci; }; struct fw_eth_tx_pkts_vm_wr { __be32 op_pkd; __be32 equiq_to_len16; __be32 r3; __be16 plen; __u8 npkt; __u8 r4; __u8 ethmacdst[6]; __u8 ethmacsrc[6]; __be16 ethtype; __be16 vlantci; }; /****************************************************************************** * R I W O R K R E Q U E S T s **************************************/ enum fw_ri_wr_opcode { FW_RI_RDMA_WRITE = 0x0, /* IETF RDMAP v1.0 ... */ FW_RI_READ_REQ = 0x1, FW_RI_READ_RESP = 0x2, FW_RI_SEND = 0x3, FW_RI_SEND_WITH_INV = 0x4, FW_RI_SEND_WITH_SE = 0x5, FW_RI_SEND_WITH_SE_INV = 0x6, FW_RI_TERMINATE = 0x7, FW_RI_RDMA_INIT = 0x8, /* CHELSIO RI specific ... */ FW_RI_BIND_MW = 0x9, FW_RI_FAST_REGISTER = 0xa, FW_RI_LOCAL_INV = 0xb, FW_RI_QP_MODIFY = 0xc, FW_RI_BYPASS = 0xd, FW_RI_RECEIVE = 0xe, #if 0 FW_RI_SEND_IMMEDIATE = 0x8, FW_RI_SEND_IMMEDIATE_WITH_SE = 0x9, FW_RI_ATOMIC_REQUEST = 0xa, FW_RI_ATOMIC_RESPONSE = 0xb, FW_RI_BIND_MW = 0xc, /* CHELSIO RI specific ... */ FW_RI_FAST_REGISTER = 0xd, FW_RI_LOCAL_INV = 0xe, #endif FW_RI_SGE_EC_CR_RETURN = 0xf, FW_RI_WRITE_IMMEDIATE = FW_RI_RDMA_INIT, }; enum fw_ri_wr_flags { FW_RI_COMPLETION_FLAG = 0x01, FW_RI_NOTIFICATION_FLAG = 0x02, FW_RI_SOLICITED_EVENT_FLAG = 0x04, FW_RI_READ_FENCE_FLAG = 0x08, FW_RI_LOCAL_FENCE_FLAG = 0x10, FW_RI_RDMA_READ_INVALIDATE = 0x20, FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40 }; enum fw_ri_mpa_attrs { FW_RI_MPA_RX_MARKER_ENABLE = 0x01, FW_RI_MPA_TX_MARKER_ENABLE = 0x02, FW_RI_MPA_CRC_ENABLE = 0x04, FW_RI_MPA_IETF_ENABLE = 0x08 }; enum fw_ri_qp_caps { FW_RI_QP_RDMA_READ_ENABLE = 0x01, FW_RI_QP_RDMA_WRITE_ENABLE = 0x02, FW_RI_QP_BIND_ENABLE = 0x04, FW_RI_QP_FAST_REGISTER_ENABLE = 0x08, FW_RI_QP_STAG0_ENABLE = 0x10, FW_RI_QP_RDMA_READ_REQ_0B_ENABLE= 0x80, }; enum fw_ri_addr_type { FW_RI_ZERO_BASED_TO = 0x00, FW_RI_VA_BASED_TO = 0x01 }; enum fw_ri_mem_perms { FW_RI_MEM_ACCESS_REM_WRITE = 0x01, FW_RI_MEM_ACCESS_REM_READ = 0x02, FW_RI_MEM_ACCESS_REM = 0x03, FW_RI_MEM_ACCESS_LOCAL_WRITE = 0x04, FW_RI_MEM_ACCESS_LOCAL_READ = 0x08, FW_RI_MEM_ACCESS_LOCAL = 0x0C }; enum fw_ri_stag_type { FW_RI_STAG_NSMR = 0x00, FW_RI_STAG_SMR = 0x01, FW_RI_STAG_MW = 0x02, FW_RI_STAG_MW_RELAXED = 0x03 }; enum fw_ri_data_op { FW_RI_DATA_IMMD = 0x81, FW_RI_DATA_DSGL = 0x82, FW_RI_DATA_ISGL = 0x83 }; enum fw_ri_sgl_depth { FW_RI_SGL_DEPTH_MAX_SQ = 16, FW_RI_SGL_DEPTH_MAX_RQ = 4 }; enum fw_ri_cqe_err { FW_RI_CQE_ERR_SUCCESS = 0x00, /* success, no error detected */ FW_RI_CQE_ERR_STAG = 0x01, /* STAG invalid */ FW_RI_CQE_ERR_PDID = 0x02, /* PDID mismatch */ FW_RI_CQE_ERR_QPID = 0x03, /* QPID mismatch */ FW_RI_CQE_ERR_ACCESS = 0x04, /* Invalid access right */ FW_RI_CQE_ERR_WRAP = 0x05, /* Wrap error */ FW_RI_CQE_ERR_BOUND = 0x06, /* base and bounds violation */ FW_RI_CQE_ERR_INVALIDATE_SHARED_MR = 0x07, /* attempt to invalidate a SMR */ FW_RI_CQE_ERR_INVALIDATE_MR_WITH_MW_BOUND = 0x08, /* attempt to invalidate a MR w MW */ FW_RI_CQE_ERR_ECC = 0x09, /* ECC error detected */ FW_RI_CQE_ERR_ECC_PSTAG = 0x0A, /* ECC error detected when reading the PSTAG for a MW Invalidate */ FW_RI_CQE_ERR_PBL_ADDR_BOUND = 0x0B, /* pbl address out of bound : software error */ FW_RI_CQE_ERR_CRC = 0x10, /* CRC error */ FW_RI_CQE_ERR_MARKER = 0x11, /* Marker error */ FW_RI_CQE_ERR_PDU_LEN_ERR = 0x12, /* invalid PDU length */ FW_RI_CQE_ERR_OUT_OF_RQE = 0x13, /* out of RQE */ FW_RI_CQE_ERR_DDP_VERSION = 0x14, /* wrong DDP version */ FW_RI_CQE_ERR_RDMA_VERSION = 0x15, /* wrong RDMA version */ FW_RI_CQE_ERR_OPCODE = 0x16, /* invalid rdma opcode */ FW_RI_CQE_ERR_DDP_QUEUE_NUM = 0x17, /* invalid ddp queue number */ FW_RI_CQE_ERR_MSN = 0x18, /* MSN error */ FW_RI_CQE_ERR_TBIT = 0x19, /* tag bit not set correctly */ FW_RI_CQE_ERR_MO = 0x1A, /* MO not zero for TERMINATE or READ_REQ */ FW_RI_CQE_ERR_MSN_GAP = 0x1B, /* */ FW_RI_CQE_ERR_MSN_RANGE = 0x1C, /* */ FW_RI_CQE_ERR_IRD_OVERFLOW = 0x1D, /* */ FW_RI_CQE_ERR_RQE_ADDR_BOUND = 0x1E, /* RQE address out of bound : software error */ FW_RI_CQE_ERR_INTERNAL_ERR = 0x1F /* internel error (opcode mismatch) */ }; struct fw_ri_dsge_pair { __be32 len[2]; __be64 addr[2]; }; struct fw_ri_dsgl { __u8 op; __u8 r1; __be16 nsge; __be32 len0; __be64 addr0; #ifndef C99_NOT_SUPPORTED struct fw_ri_dsge_pair sge[0]; #endif }; struct fw_ri_sge { __be32 stag; __be32 len; __be64 to; }; struct fw_ri_isgl { __u8 op; __u8 r1; __be16 nsge; __be32 r2; #ifndef C99_NOT_SUPPORTED struct fw_ri_sge sge[0]; #endif }; struct fw_ri_immd { __u8 op; __u8 r1; __be16 r2; __be32 immdlen; #ifndef C99_NOT_SUPPORTED __u8 data[0]; #endif }; struct fw_ri_tpte { __be32 valid_to_pdid; __be32 locread_to_qpid; __be32 nosnoop_pbladdr; __be32 len_lo; __be32 va_hi; __be32 va_lo_fbo; __be32 dca_mwbcnt_pstag; __be32 len_hi; }; #define S_FW_RI_TPTE_VALID 31 #define M_FW_RI_TPTE_VALID 0x1 #define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID) #define G_FW_RI_TPTE_VALID(x) \ (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID) #define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U) #define S_FW_RI_TPTE_STAGKEY 23 #define M_FW_RI_TPTE_STAGKEY 0xff #define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY) #define G_FW_RI_TPTE_STAGKEY(x) \ (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY) #define S_FW_RI_TPTE_STAGSTATE 22 #define M_FW_RI_TPTE_STAGSTATE 0x1 #define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE) #define G_FW_RI_TPTE_STAGSTATE(x) \ (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE) #define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U) #define S_FW_RI_TPTE_STAGTYPE 20 #define M_FW_RI_TPTE_STAGTYPE 0x3 #define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE) #define G_FW_RI_TPTE_STAGTYPE(x) \ (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE) #define S_FW_RI_TPTE_PDID 0 #define M_FW_RI_TPTE_PDID 0xfffff #define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID) #define G_FW_RI_TPTE_PDID(x) \ (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID) #define S_FW_RI_TPTE_PERM 28 #define M_FW_RI_TPTE_PERM 0xf #define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM) #define G_FW_RI_TPTE_PERM(x) \ (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM) #define S_FW_RI_TPTE_REMINVDIS 27 #define M_FW_RI_TPTE_REMINVDIS 0x1 #define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS) #define G_FW_RI_TPTE_REMINVDIS(x) \ (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS) #define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U) #define S_FW_RI_TPTE_ADDRTYPE 26 #define M_FW_RI_TPTE_ADDRTYPE 1 #define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE) #define G_FW_RI_TPTE_ADDRTYPE(x) \ (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE) #define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U) #define S_FW_RI_TPTE_MWBINDEN 25 #define M_FW_RI_TPTE_MWBINDEN 0x1 #define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN) #define G_FW_RI_TPTE_MWBINDEN(x) \ (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN) #define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U) #define S_FW_RI_TPTE_PS 20 #define M_FW_RI_TPTE_PS 0x1f #define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS) #define G_FW_RI_TPTE_PS(x) \ (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS) #define S_FW_RI_TPTE_QPID 0 #define M_FW_RI_TPTE_QPID 0xfffff #define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID) #define G_FW_RI_TPTE_QPID(x) \ (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID) #define S_FW_RI_TPTE_NOSNOOP 31 #define M_FW_RI_TPTE_NOSNOOP 0x1 #define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP) #define G_FW_RI_TPTE_NOSNOOP(x) \ (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP) #define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U) #define S_FW_RI_TPTE_PBLADDR 0 #define M_FW_RI_TPTE_PBLADDR 0x1fffffff #define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR) #define G_FW_RI_TPTE_PBLADDR(x) \ (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR) #define S_FW_RI_TPTE_DCA 24 #define M_FW_RI_TPTE_DCA 0x1f #define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA) #define G_FW_RI_TPTE_DCA(x) \ (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA) #define S_FW_RI_TPTE_MWBCNT_PSTAG 0 #define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff #define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \ ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG) #define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \ (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG) enum fw_ri_cqe_rxtx { FW_RI_CQE_RXTX_RX = 0x0, FW_RI_CQE_RXTX_TX = 0x1, }; struct fw_ri_cqe { union fw_ri_rxtx { struct fw_ri_scqe { __be32 qpid_n_stat_rxtx_type; __be32 plen; __be32 stag; __be32 wrid; } scqe; struct fw_ri_rcqe { __be32 qpid_n_stat_rxtx_type; __be32 plen; __be32 stag; __be32 msn; } rcqe; struct fw_ri_rcqe_imm { __be32 qpid_n_stat_rxtx_type; __be32 plen; __be32 mo; __be32 msn; __u64 imm_data; } imm_data_rcqe; } u; }; #define S_FW_RI_CQE_QPID 12 #define M_FW_RI_CQE_QPID 0xfffff #define V_FW_RI_CQE_QPID(x) ((x) << S_FW_RI_CQE_QPID) #define G_FW_RI_CQE_QPID(x) \ (((x) >> S_FW_RI_CQE_QPID) & M_FW_RI_CQE_QPID) #define S_FW_RI_CQE_NOTIFY 10 #define M_FW_RI_CQE_NOTIFY 0x1 #define V_FW_RI_CQE_NOTIFY(x) ((x) << S_FW_RI_CQE_NOTIFY) #define G_FW_RI_CQE_NOTIFY(x) \ (((x) >> S_FW_RI_CQE_NOTIFY) & M_FW_RI_CQE_NOTIFY) #define S_FW_RI_CQE_STATUS 5 #define M_FW_RI_CQE_STATUS 0x1f #define V_FW_RI_CQE_STATUS(x) ((x) << S_FW_RI_CQE_STATUS) #define G_FW_RI_CQE_STATUS(x) \ (((x) >> S_FW_RI_CQE_STATUS) & M_FW_RI_CQE_STATUS) #define S_FW_RI_CQE_RXTX 4 #define M_FW_RI_CQE_RXTX 0x1 #define V_FW_RI_CQE_RXTX(x) ((x) << S_FW_RI_CQE_RXTX) #define G_FW_RI_CQE_RXTX(x) \ (((x) >> S_FW_RI_CQE_RXTX) & M_FW_RI_CQE_RXTX) #define S_FW_RI_CQE_TYPE 0 #define M_FW_RI_CQE_TYPE 0xf #define V_FW_RI_CQE_TYPE(x) ((x) << S_FW_RI_CQE_TYPE) #define G_FW_RI_CQE_TYPE(x) \ (((x) >> S_FW_RI_CQE_TYPE) & M_FW_RI_CQE_TYPE) enum fw_ri_res_type { FW_RI_RES_TYPE_SQ, FW_RI_RES_TYPE_RQ, FW_RI_RES_TYPE_CQ, FW_RI_RES_TYPE_SRQ, }; enum fw_ri_res_op { FW_RI_RES_OP_WRITE, FW_RI_RES_OP_RESET, }; struct fw_ri_res { union fw_ri_restype { struct fw_ri_res_sqrq { __u8 restype; __u8 op; __be16 r3; __be32 eqid; __be32 r4[2]; __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; } sqrq; struct fw_ri_res_cq { __u8 restype; __u8 op; __be16 r3; __be32 iqid; __be32 r4[2]; __be32 iqandst_to_iqandstindex; __be16 iqdroprss_to_iqesize; __be16 iqsize; __be64 iqaddr; __be32 iqns_iqro; __be32 r6_lo; __be64 r7; } cq; struct fw_ri_res_srq { __u8 restype; __u8 op; __be16 r3; __be32 eqid; __be32 r4[2]; __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; __be32 srqid; __be32 pdid; __be32 hwsrqsize; __be32 hwsrqaddr; } srq; } u; }; struct fw_ri_res_wr { __be32 op_nres; __be32 len16_pkd; __u64 cookie; #ifndef C99_NOT_SUPPORTED struct fw_ri_res res[0]; #endif }; #define S_FW_RI_RES_WR_VFN 8 #define M_FW_RI_RES_WR_VFN 0xff #define V_FW_RI_RES_WR_VFN(x) ((x) << S_FW_RI_RES_WR_VFN) #define G_FW_RI_RES_WR_VFN(x) \ (((x) >> S_FW_RI_RES_WR_VFN) & M_FW_RI_RES_WR_VFN) #define S_FW_RI_RES_WR_NRES 0 #define M_FW_RI_RES_WR_NRES 0xff #define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES) #define G_FW_RI_RES_WR_NRES(x) \ (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES) #define S_FW_RI_RES_WR_FETCHSZM 26 #define M_FW_RI_RES_WR_FETCHSZM 0x1 #define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM) #define G_FW_RI_RES_WR_FETCHSZM(x) \ (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM) #define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U) #define S_FW_RI_RES_WR_STATUSPGNS 25 #define M_FW_RI_RES_WR_STATUSPGNS 0x1 #define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS) #define G_FW_RI_RES_WR_STATUSPGNS(x) \ (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS) #define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U) #define S_FW_RI_RES_WR_STATUSPGRO 24 #define M_FW_RI_RES_WR_STATUSPGRO 0x1 #define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO) #define G_FW_RI_RES_WR_STATUSPGRO(x) \ (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO) #define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U) #define S_FW_RI_RES_WR_FETCHNS 23 #define M_FW_RI_RES_WR_FETCHNS 0x1 #define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS) #define G_FW_RI_RES_WR_FETCHNS(x) \ (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS) #define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U) #define S_FW_RI_RES_WR_FETCHRO 22 #define M_FW_RI_RES_WR_FETCHRO 0x1 #define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO) #define G_FW_RI_RES_WR_FETCHRO(x) \ (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO) #define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U) #define S_FW_RI_RES_WR_HOSTFCMODE 20 #define M_FW_RI_RES_WR_HOSTFCMODE 0x3 #define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE) #define G_FW_RI_RES_WR_HOSTFCMODE(x) \ (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE) #define S_FW_RI_RES_WR_CPRIO 19 #define M_FW_RI_RES_WR_CPRIO 0x1 #define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO) #define G_FW_RI_RES_WR_CPRIO(x) \ (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO) #define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U) #define S_FW_RI_RES_WR_ONCHIP 18 #define M_FW_RI_RES_WR_ONCHIP 0x1 #define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP) #define G_FW_RI_RES_WR_ONCHIP(x) \ (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP) #define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U) #define S_FW_RI_RES_WR_PCIECHN 16 #define M_FW_RI_RES_WR_PCIECHN 0x3 #define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN) #define G_FW_RI_RES_WR_PCIECHN(x) \ (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN) #define S_FW_RI_RES_WR_IQID 0 #define M_FW_RI_RES_WR_IQID 0xffff #define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID) #define G_FW_RI_RES_WR_IQID(x) \ (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID) #define S_FW_RI_RES_WR_DCAEN 31 #define M_FW_RI_RES_WR_DCAEN 0x1 #define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN) #define G_FW_RI_RES_WR_DCAEN(x) \ (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN) #define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U) #define S_FW_RI_RES_WR_DCACPU 26 #define M_FW_RI_RES_WR_DCACPU 0x1f #define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU) #define G_FW_RI_RES_WR_DCACPU(x) \ (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU) #define S_FW_RI_RES_WR_FBMIN 23 #define M_FW_RI_RES_WR_FBMIN 0x7 #define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN) #define G_FW_RI_RES_WR_FBMIN(x) \ (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN) #define S_FW_RI_RES_WR_FBMAX 20 #define M_FW_RI_RES_WR_FBMAX 0x7 #define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX) #define G_FW_RI_RES_WR_FBMAX(x) \ (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX) #define S_FW_RI_RES_WR_CIDXFTHRESHO 19 #define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1 #define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO) #define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \ (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO) #define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U) #define S_FW_RI_RES_WR_CIDXFTHRESH 16 #define M_FW_RI_RES_WR_CIDXFTHRESH 0x7 #define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH) #define G_FW_RI_RES_WR_CIDXFTHRESH(x) \ (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH) #define S_FW_RI_RES_WR_EQSIZE 0 #define M_FW_RI_RES_WR_EQSIZE 0xffff #define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE) #define G_FW_RI_RES_WR_EQSIZE(x) \ (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE) #define S_FW_RI_RES_WR_IQANDST 15 #define M_FW_RI_RES_WR_IQANDST 0x1 #define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST) #define G_FW_RI_RES_WR_IQANDST(x) \ (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST) #define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U) #define S_FW_RI_RES_WR_IQANUS 14 #define M_FW_RI_RES_WR_IQANUS 0x1 #define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS) #define G_FW_RI_RES_WR_IQANUS(x) \ (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS) #define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U) #define S_FW_RI_RES_WR_IQANUD 12 #define M_FW_RI_RES_WR_IQANUD 0x3 #define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD) #define G_FW_RI_RES_WR_IQANUD(x) \ (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD) #define S_FW_RI_RES_WR_IQANDSTINDEX 0 #define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff #define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX) #define G_FW_RI_RES_WR_IQANDSTINDEX(x) \ (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX) #define S_FW_RI_RES_WR_IQDROPRSS 15 #define M_FW_RI_RES_WR_IQDROPRSS 0x1 #define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS) #define G_FW_RI_RES_WR_IQDROPRSS(x) \ (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS) #define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U) #define S_FW_RI_RES_WR_IQGTSMODE 14 #define M_FW_RI_RES_WR_IQGTSMODE 0x1 #define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE) #define G_FW_RI_RES_WR_IQGTSMODE(x) \ (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE) #define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U) #define S_FW_RI_RES_WR_IQPCIECH 12 #define M_FW_RI_RES_WR_IQPCIECH 0x3 #define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH) #define G_FW_RI_RES_WR_IQPCIECH(x) \ (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH) #define S_FW_RI_RES_WR_IQDCAEN 11 #define M_FW_RI_RES_WR_IQDCAEN 0x1 #define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN) #define G_FW_RI_RES_WR_IQDCAEN(x) \ (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN) #define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U) #define S_FW_RI_RES_WR_IQDCACPU 6 #define M_FW_RI_RES_WR_IQDCACPU 0x1f #define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU) #define G_FW_RI_RES_WR_IQDCACPU(x) \ (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU) #define S_FW_RI_RES_WR_IQINTCNTTHRESH 4 #define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3 #define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \ ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH) #define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \ (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH) #define S_FW_RI_RES_WR_IQO 3 #define M_FW_RI_RES_WR_IQO 0x1 #define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO) #define G_FW_RI_RES_WR_IQO(x) \ (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO) #define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U) #define S_FW_RI_RES_WR_IQCPRIO 2 #define M_FW_RI_RES_WR_IQCPRIO 0x1 #define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO) #define G_FW_RI_RES_WR_IQCPRIO(x) \ (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO) #define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U) #define S_FW_RI_RES_WR_IQESIZE 0 #define M_FW_RI_RES_WR_IQESIZE 0x3 #define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE) #define G_FW_RI_RES_WR_IQESIZE(x) \ (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE) #define S_FW_RI_RES_WR_IQNS 31 #define M_FW_RI_RES_WR_IQNS 0x1 #define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS) #define G_FW_RI_RES_WR_IQNS(x) \ (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS) #define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U) #define S_FW_RI_RES_WR_IQRO 30 #define M_FW_RI_RES_WR_IQRO 0x1 #define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO) #define G_FW_RI_RES_WR_IQRO(x) \ (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO) #define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U) struct fw_ri_rdma_write_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __u64 immd_data; __be32 plen; __be32 stag_sink; __be64 to_sink; #ifndef C99_NOT_SUPPORTED union { struct fw_ri_immd immd_src[0]; struct fw_ri_isgl isgl_src[0]; } u; #endif }; struct fw_ri_send_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __be32 sendop_pkd; __be32 stag_inv; __be32 plen; __be32 r3; __be64 r4; #ifndef C99_NOT_SUPPORTED union { struct fw_ri_immd immd_src[0]; struct fw_ri_isgl isgl_src[0]; } u; #endif }; #define S_FW_RI_SEND_WR_SENDOP 0 #define M_FW_RI_SEND_WR_SENDOP 0xf #define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP) #define G_FW_RI_SEND_WR_SENDOP(x) \ (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP) struct fw_ri_rdma_write_cmpl_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __u8 r2; __u8 flags_send; __u16 wrid_send; __be32 stag_inv; __be32 plen; __be32 stag_sink; __be64 to_sink; union fw_ri_cmpl { struct fw_ri_immd_cmpl { __u8 op; __u8 r1[6]; __u8 immdlen; __u8 data[16]; } immd_src; struct fw_ri_isgl isgl_src; } u_cmpl; __be64 r3; #ifndef C99_NOT_SUPPORTED union fw_ri_write { struct fw_ri_immd immd_src[0]; struct fw_ri_isgl isgl_src[0]; } u; #endif }; struct fw_ri_rdma_read_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __be64 r2; __be32 stag_sink; __be32 to_sink_hi; __be32 to_sink_lo; __be32 plen; __be32 stag_src; __be32 to_src_hi; __be32 to_src_lo; __be32 r5; }; struct fw_ri_recv_wr { __u8 opcode; __u8 r1; __u16 wrid; __u8 r2[3]; __u8 len16; struct fw_ri_isgl isgl; }; struct fw_ri_bind_mw_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __u8 qpbinde_to_dcacpu; __u8 pgsz_shift; __u8 addr_type; __u8 mem_perms; __be32 stag_mr; __be32 stag_mw; __be32 r3; __be64 len_mw; __be64 va_fbo; __be64 r4; }; #define S_FW_RI_BIND_MW_WR_QPBINDE 6 #define M_FW_RI_BIND_MW_WR_QPBINDE 0x1 #define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE) #define G_FW_RI_BIND_MW_WR_QPBINDE(x) \ (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE) #define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U) #define S_FW_RI_BIND_MW_WR_NS 5 #define M_FW_RI_BIND_MW_WR_NS 0x1 #define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS) #define G_FW_RI_BIND_MW_WR_NS(x) \ (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS) #define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U) #define S_FW_RI_BIND_MW_WR_DCACPU 0 #define M_FW_RI_BIND_MW_WR_DCACPU 0x1f #define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU) #define G_FW_RI_BIND_MW_WR_DCACPU(x) \ (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU) struct fw_ri_fr_nsmr_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __u8 qpbinde_to_dcacpu; __u8 pgsz_shift; __u8 addr_type; __u8 mem_perms; __be32 stag; __be32 len_hi; __be32 len_lo; __be32 va_hi; __be32 va_lo_fbo; }; #define S_FW_RI_FR_NSMR_WR_QPBINDE 6 #define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1 #define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE) #define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \ (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE) #define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U) #define S_FW_RI_FR_NSMR_WR_NS 5 #define M_FW_RI_FR_NSMR_WR_NS 0x1 #define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS) #define G_FW_RI_FR_NSMR_WR_NS(x) \ (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS) #define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U) #define S_FW_RI_FR_NSMR_WR_DCACPU 0 #define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f #define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU) #define G_FW_RI_FR_NSMR_WR_DCACPU(x) \ (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU) struct fw_ri_fr_nsmr_tpte_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __be32 r2; __be32 stag; struct fw_ri_tpte tpte; __be64 pbl[2]; }; struct fw_ri_inv_lstag_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __be32 r2; __be32 stag_inv; }; struct fw_ri_send_immediate_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __be32 sendimmop_pkd; __be32 r3; __be32 plen; __be32 r4; __be64 r5; #ifndef C99_NOT_SUPPORTED struct fw_ri_immd immd_src[0]; #endif }; #define S_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP 0 #define M_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP 0xf #define V_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP(x) \ ((x) << S_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP) #define G_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP(x) \ (((x) >> S_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP) & \ M_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP) enum fw_ri_atomic_op { FW_RI_ATOMIC_OP_FETCHADD, FW_RI_ATOMIC_OP_SWAP, FW_RI_ATOMIC_OP_CMDSWAP, }; struct fw_ri_atomic_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __be32 atomicop_pkd; __be64 r3; __be32 aopcode_pkd; __be32 reqid; __be32 stag; __be32 to_hi; __be32 to_lo; __be32 addswap_data_hi; __be32 addswap_data_lo; __be32 addswap_mask_hi; __be32 addswap_mask_lo; __be32 compare_data_hi; __be32 compare_data_lo; __be32 compare_mask_hi; __be32 compare_mask_lo; __be32 r5; }; #define S_FW_RI_ATOMIC_WR_ATOMICOP 0 #define M_FW_RI_ATOMIC_WR_ATOMICOP 0xf #define V_FW_RI_ATOMIC_WR_ATOMICOP(x) ((x) << S_FW_RI_ATOMIC_WR_ATOMICOP) #define G_FW_RI_ATOMIC_WR_ATOMICOP(x) \ (((x) >> S_FW_RI_ATOMIC_WR_ATOMICOP) & M_FW_RI_ATOMIC_WR_ATOMICOP) #define S_FW_RI_ATOMIC_WR_AOPCODE 0 #define M_FW_RI_ATOMIC_WR_AOPCODE 0xf #define V_FW_RI_ATOMIC_WR_AOPCODE(x) ((x) << S_FW_RI_ATOMIC_WR_AOPCODE) #define G_FW_RI_ATOMIC_WR_AOPCODE(x) \ (((x) >> S_FW_RI_ATOMIC_WR_AOPCODE) & M_FW_RI_ATOMIC_WR_AOPCODE) enum fw_ri_type { FW_RI_TYPE_INIT, FW_RI_TYPE_FINI, FW_RI_TYPE_TERMINATE }; enum fw_ri_init_p2ptype { FW_RI_INIT_P2PTYPE_RDMA_WRITE = FW_RI_RDMA_WRITE, FW_RI_INIT_P2PTYPE_READ_REQ = FW_RI_READ_REQ, FW_RI_INIT_P2PTYPE_SEND = FW_RI_SEND, FW_RI_INIT_P2PTYPE_SEND_WITH_INV = FW_RI_SEND_WITH_INV, FW_RI_INIT_P2PTYPE_SEND_WITH_SE = FW_RI_SEND_WITH_SE, FW_RI_INIT_P2PTYPE_SEND_WITH_SE_INV = FW_RI_SEND_WITH_SE_INV, FW_RI_INIT_P2PTYPE_DISABLED = 0xf, }; enum fw_ri_init_rqeqid_srq { FW_RI_INIT_RQEQID_SRQ = 1 << 31, }; struct fw_ri_wr { __be32 op_compl; __be32 flowid_len16; __u64 cookie; union fw_ri { struct fw_ri_init { __u8 type; __u8 mpareqbit_p2ptype; __u8 r4[2]; __u8 mpa_attrs; __u8 qp_caps; __be16 nrqe; __be32 pdid; __be32 qpid; __be32 sq_eqid; __be32 rq_eqid; __be32 scqid; __be32 rcqid; __be32 ord_max; __be32 ird_max; __be32 iss; __be32 irs; __be32 hwrqsize; __be32 hwrqaddr; __be64 r5; union fw_ri_init_p2p { struct fw_ri_rdma_write_wr write; struct fw_ri_rdma_read_wr read; struct fw_ri_send_wr send; } u; } init; struct fw_ri_fini { __u8 type; __u8 r3[7]; __be64 r4; } fini; struct fw_ri_terminate { __u8 type; __u8 r3[3]; __be32 immdlen; __u8 termmsg[40]; } terminate; } u; }; #define S_FW_RI_WR_MPAREQBIT 7 #define M_FW_RI_WR_MPAREQBIT 0x1 #define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT) #define G_FW_RI_WR_MPAREQBIT(x) \ (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT) #define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U) #define S_FW_RI_WR_0BRRBIT 6 #define M_FW_RI_WR_0BRRBIT 0x1 #define V_FW_RI_WR_0BRRBIT(x) ((x) << S_FW_RI_WR_0BRRBIT) #define G_FW_RI_WR_0BRRBIT(x) \ (((x) >> S_FW_RI_WR_0BRRBIT) & M_FW_RI_WR_0BRRBIT) #define F_FW_RI_WR_0BRRBIT V_FW_RI_WR_0BRRBIT(1U) #define S_FW_RI_WR_P2PTYPE 0 #define M_FW_RI_WR_P2PTYPE 0xf #define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE) #define G_FW_RI_WR_P2PTYPE(x) \ (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE) /****************************************************************************** * F O i S C S I W O R K R E Q U E S T s *********************************************/ #define FW_FOISCSI_NAME_MAX_LEN 224 #define FW_FOISCSI_ALIAS_MAX_LEN 224 #define FW_FOISCSI_KEY_MAX_LEN 64 #define FW_FOISCSI_VAL_MAX_LEN 256 #define FW_FOISCSI_CHAP_SEC_MAX_LEN 128 #define FW_FOISCSI_INIT_NODE_MAX 8 enum fw_chnet_ifconf_wr_subop { FW_CHNET_IFCONF_WR_SUBOP_NONE = 0, FW_CHNET_IFCONF_WR_SUBOP_IPV4_SET, FW_CHNET_IFCONF_WR_SUBOP_IPV4_GET, FW_CHNET_IFCONF_WR_SUBOP_VLAN_IPV4_SET, FW_CHNET_IFCONF_WR_SUBOP_VLAN_IPV4_GET, FW_CHNET_IFCONF_WR_SUBOP_IPV6_SET, FW_CHNET_IFCONF_WR_SUBOP_IPV6_GET, FW_CHNET_IFCONF_WR_SUBOP_VLAN_SET, FW_CHNET_IFCONF_WR_SUBOP_VLAN_GET, FW_CHNET_IFCONF_WR_SUBOP_MTU_SET, FW_CHNET_IFCONF_WR_SUBOP_MTU_GET, FW_CHNET_IFCONF_WR_SUBOP_DHCP_SET, FW_CHNET_IFCONF_WR_SUBOP_DHCP_GET, FW_CHNET_IFCONF_WR_SUBOP_DHCPV6_SET, FW_CHNET_IFCONF_WR_SUBOP_DHCPV6_GET, FW_CHNET_IFCONF_WR_SUBOP_LINKLOCAL_ADDR_SET, FW_CHNET_IFCONF_WR_SUBOP_RA_BASED_ADDR_SET, FW_CHNET_IFCONF_WR_SUBOP_ADDR_EXPIRED, FW_CHNET_IFCONF_WR_SUBOP_ICMP_PING4, FW_CHNET_IFCONF_WR_SUBOP_ICMP_PING6, FW_CHNET_IFCONF_WR_SUBOP_MAX, }; struct fw_chnet_ifconf_wr { __be32 op_compl; __be32 flowid_len16; __u64 cookie; __be32 if_flowid; __u8 idx; __u8 subop; __u8 retval; __u8 r2; union { __be64 r3; struct fw_chnet_ifconf_ping { __be16 ping_time; __u8 ping_rsptype; __u8 ping_param_rspcode_to_fin_bit; __u8 ping_pktsize; __u8 ping_ttl; __be16 ping_seq; } ping; struct fw_chnet_ifconf_mac { __u8 peer_mac[6]; __u8 smac_idx; } mac; } u; struct fw_chnet_ifconf_params { __be32 r0; __be16 vlanid; __be16 mtu; union fw_chnet_ifconf_addr_type { struct fw_chnet_ifconf_ipv4 { __be32 addr; __be32 mask; __be32 router; __be32 r0; __be64 r1; } ipv4; struct fw_chnet_ifconf_ipv6 { __u8 prefix_len; __u8 r0; __be16 r1; __be32 r2; __be64 addr_hi; __be64 addr_lo; __be64 router_hi; __be64 router_lo; } ipv6; } in_attr; } param; }; #define S_FW_CHNET_IFCONF_WR_PING_MACBIT 1 #define M_FW_CHNET_IFCONF_WR_PING_MACBIT 0x1 #define V_FW_CHNET_IFCONF_WR_PING_MACBIT(x) \ ((x) << S_FW_CHNET_IFCONF_WR_PING_MACBIT) #define G_FW_CHNET_IFCONF_WR_PING_MACBIT(x) \ (((x) >> S_FW_CHNET_IFCONF_WR_PING_MACBIT) & \ M_FW_CHNET_IFCONF_WR_PING_MACBIT) #define F_FW_CHNET_IFCONF_WR_PING_MACBIT \ V_FW_CHNET_IFCONF_WR_PING_MACBIT(1U) #define S_FW_CHNET_IFCONF_WR_FIN_BIT 0 #define M_FW_CHNET_IFCONF_WR_FIN_BIT 0x1 #define V_FW_CHNET_IFCONF_WR_FIN_BIT(x) ((x) << S_FW_CHNET_IFCONF_WR_FIN_BIT) #define G_FW_CHNET_IFCONF_WR_FIN_BIT(x) \ (((x) >> S_FW_CHNET_IFCONF_WR_FIN_BIT) & M_FW_CHNET_IFCONF_WR_FIN_BIT) #define F_FW_CHNET_IFCONF_WR_FIN_BIT V_FW_CHNET_IFCONF_WR_FIN_BIT(1U) enum fw_foiscsi_node_type { FW_FOISCSI_NODE_TYPE_INITIATOR = 0, FW_FOISCSI_NODE_TYPE_TARGET, }; enum fw_foiscsi_session_type { FW_FOISCSI_SESSION_TYPE_DISCOVERY = 0, FW_FOISCSI_SESSION_TYPE_NORMAL, }; enum fw_foiscsi_auth_policy { FW_FOISCSI_AUTH_POLICY_ONEWAY = 0, FW_FOISCSI_AUTH_POLICY_MUTUAL, }; enum fw_foiscsi_auth_method { FW_FOISCSI_AUTH_METHOD_NONE = 0, FW_FOISCSI_AUTH_METHOD_CHAP, FW_FOISCSI_AUTH_METHOD_CHAP_FST, FW_FOISCSI_AUTH_METHOD_CHAP_SEC, }; enum fw_foiscsi_digest_type { FW_FOISCSI_DIGEST_TYPE_NONE = 0, FW_FOISCSI_DIGEST_TYPE_CRC32, FW_FOISCSI_DIGEST_TYPE_CRC32_FST, FW_FOISCSI_DIGEST_TYPE_CRC32_SEC, }; enum fw_foiscsi_wr_subop { FW_FOISCSI_WR_SUBOP_ADD = 1, FW_FOISCSI_WR_SUBOP_DEL = 2, FW_FOISCSI_WR_SUBOP_MOD = 4, }; enum fw_coiscsi_stats_wr_subop { FW_COISCSI_WR_SUBOP_TOT = 1, FW_COISCSI_WR_SUBOP_MAX = 2, FW_COISCSI_WR_SUBOP_CUR = 3, FW_COISCSI_WR_SUBOP_CLR = 4, }; enum fw_foiscsi_ctrl_state { FW_FOISCSI_CTRL_STATE_FREE = 0, FW_FOISCSI_CTRL_STATE_ONLINE = 1, FW_FOISCSI_CTRL_STATE_FAILED, FW_FOISCSI_CTRL_STATE_IN_RECOVERY, FW_FOISCSI_CTRL_STATE_REDIRECT, }; struct fw_rdev_wr { __be32 op_to_immdlen; __be32 alloc_to_len16; __be64 cookie; __u8 protocol; __u8 event_cause; __u8 cur_state; __u8 prev_state; __be32 flags_to_assoc_flowid; union rdev_entry { struct fcoe_rdev_entry { __be32 flowid; __u8 protocol; __u8 event_cause; __u8 flags; __u8 rjt_reason; __u8 cur_login_st; __u8 prev_login_st; __be16 rcv_fr_sz; __u8 rd_xfer_rdy_to_rport_type; __u8 vft_to_qos; __u8 org_proc_assoc_to_acc_rsp_code; __u8 enh_disc_to_tgt; __u8 wwnn[8]; __u8 wwpn[8]; __be16 iqid; __u8 fc_oui[3]; __u8 r_id[3]; } fcoe_rdev; struct iscsi_rdev_entry { __be32 flowid; __u8 protocol; __u8 event_cause; __u8 flags; __u8 r3; __be16 iscsi_opts; __be16 tcp_opts; __be16 ip_opts; __be16 max_rcv_len; __be16 max_snd_len; __be16 first_brst_len; __be16 max_brst_len; __be16 r4; __be16 def_time2wait; __be16 def_time2ret; __be16 nop_out_intrvl; __be16 non_scsi_to; __be16 isid; __be16 tsid; __be16 port; __be16 tpgt; __u8 r5[6]; __be16 iqid; } iscsi_rdev; } u; }; #define S_FW_RDEV_WR_IMMDLEN 0 #define M_FW_RDEV_WR_IMMDLEN 0xff #define V_FW_RDEV_WR_IMMDLEN(x) ((x) << S_FW_RDEV_WR_IMMDLEN) #define G_FW_RDEV_WR_IMMDLEN(x) \ (((x) >> S_FW_RDEV_WR_IMMDLEN) & M_FW_RDEV_WR_IMMDLEN) #define S_FW_RDEV_WR_ALLOC 31 #define M_FW_RDEV_WR_ALLOC 0x1 #define V_FW_RDEV_WR_ALLOC(x) ((x) << S_FW_RDEV_WR_ALLOC) #define G_FW_RDEV_WR_ALLOC(x) \ (((x) >> S_FW_RDEV_WR_ALLOC) & M_FW_RDEV_WR_ALLOC) #define F_FW_RDEV_WR_ALLOC V_FW_RDEV_WR_ALLOC(1U) #define S_FW_RDEV_WR_FREE 30 #define M_FW_RDEV_WR_FREE 0x1 #define V_FW_RDEV_WR_FREE(x) ((x) << S_FW_RDEV_WR_FREE) #define G_FW_RDEV_WR_FREE(x) \ (((x) >> S_FW_RDEV_WR_FREE) & M_FW_RDEV_WR_FREE) #define F_FW_RDEV_WR_FREE V_FW_RDEV_WR_FREE(1U) #define S_FW_RDEV_WR_MODIFY 29 #define M_FW_RDEV_WR_MODIFY 0x1 #define V_FW_RDEV_WR_MODIFY(x) ((x) << S_FW_RDEV_WR_MODIFY) #define G_FW_RDEV_WR_MODIFY(x) \ (((x) >> S_FW_RDEV_WR_MODIFY) & M_FW_RDEV_WR_MODIFY) #define F_FW_RDEV_WR_MODIFY V_FW_RDEV_WR_MODIFY(1U) #define S_FW_RDEV_WR_FLOWID 8 #define M_FW_RDEV_WR_FLOWID 0xfffff #define V_FW_RDEV_WR_FLOWID(x) ((x) << S_FW_RDEV_WR_FLOWID) #define G_FW_RDEV_WR_FLOWID(x) \ (((x) >> S_FW_RDEV_WR_FLOWID) & M_FW_RDEV_WR_FLOWID) #define S_FW_RDEV_WR_LEN16 0 #define M_FW_RDEV_WR_LEN16 0xff #define V_FW_RDEV_WR_LEN16(x) ((x) << S_FW_RDEV_WR_LEN16) #define G_FW_RDEV_WR_LEN16(x) \ (((x) >> S_FW_RDEV_WR_LEN16) & M_FW_RDEV_WR_LEN16) #define S_FW_RDEV_WR_FLAGS 24 #define M_FW_RDEV_WR_FLAGS 0xff #define V_FW_RDEV_WR_FLAGS(x) ((x) << S_FW_RDEV_WR_FLAGS) #define G_FW_RDEV_WR_FLAGS(x) \ (((x) >> S_FW_RDEV_WR_FLAGS) & M_FW_RDEV_WR_FLAGS) #define S_FW_RDEV_WR_GET_NEXT 20 #define M_FW_RDEV_WR_GET_NEXT 0xf #define V_FW_RDEV_WR_GET_NEXT(x) ((x) << S_FW_RDEV_WR_GET_NEXT) #define G_FW_RDEV_WR_GET_NEXT(x) \ (((x) >> S_FW_RDEV_WR_GET_NEXT) & M_FW_RDEV_WR_GET_NEXT) #define S_FW_RDEV_WR_ASSOC_FLOWID 0 #define M_FW_RDEV_WR_ASSOC_FLOWID 0xfffff #define V_FW_RDEV_WR_ASSOC_FLOWID(x) ((x) << S_FW_RDEV_WR_ASSOC_FLOWID) #define G_FW_RDEV_WR_ASSOC_FLOWID(x) \ (((x) >> S_FW_RDEV_WR_ASSOC_FLOWID) & M_FW_RDEV_WR_ASSOC_FLOWID) #define S_FW_RDEV_WR_RJT 7 #define M_FW_RDEV_WR_RJT 0x1 #define V_FW_RDEV_WR_RJT(x) ((x) << S_FW_RDEV_WR_RJT) #define G_FW_RDEV_WR_RJT(x) (((x) >> S_FW_RDEV_WR_RJT) & M_FW_RDEV_WR_RJT) #define F_FW_RDEV_WR_RJT V_FW_RDEV_WR_RJT(1U) #define S_FW_RDEV_WR_REASON 0 #define M_FW_RDEV_WR_REASON 0x7f #define V_FW_RDEV_WR_REASON(x) ((x) << S_FW_RDEV_WR_REASON) #define G_FW_RDEV_WR_REASON(x) \ (((x) >> S_FW_RDEV_WR_REASON) & M_FW_RDEV_WR_REASON) #define S_FW_RDEV_WR_RD_XFER_RDY 7 #define M_FW_RDEV_WR_RD_XFER_RDY 0x1 #define V_FW_RDEV_WR_RD_XFER_RDY(x) ((x) << S_FW_RDEV_WR_RD_XFER_RDY) #define G_FW_RDEV_WR_RD_XFER_RDY(x) \ (((x) >> S_FW_RDEV_WR_RD_XFER_RDY) & M_FW_RDEV_WR_RD_XFER_RDY) #define F_FW_RDEV_WR_RD_XFER_RDY V_FW_RDEV_WR_RD_XFER_RDY(1U) #define S_FW_RDEV_WR_WR_XFER_RDY 6 #define M_FW_RDEV_WR_WR_XFER_RDY 0x1 #define V_FW_RDEV_WR_WR_XFER_RDY(x) ((x) << S_FW_RDEV_WR_WR_XFER_RDY) #define G_FW_RDEV_WR_WR_XFER_RDY(x) \ (((x) >> S_FW_RDEV_WR_WR_XFER_RDY) & M_FW_RDEV_WR_WR_XFER_RDY) #define F_FW_RDEV_WR_WR_XFER_RDY V_FW_RDEV_WR_WR_XFER_RDY(1U) #define S_FW_RDEV_WR_FC_SP 5 #define M_FW_RDEV_WR_FC_SP 0x1 #define V_FW_RDEV_WR_FC_SP(x) ((x) << S_FW_RDEV_WR_FC_SP) #define G_FW_RDEV_WR_FC_SP(x) \ (((x) >> S_FW_RDEV_WR_FC_SP) & M_FW_RDEV_WR_FC_SP) #define F_FW_RDEV_WR_FC_SP V_FW_RDEV_WR_FC_SP(1U) #define S_FW_RDEV_WR_RPORT_TYPE 0 #define M_FW_RDEV_WR_RPORT_TYPE 0x1f #define V_FW_RDEV_WR_RPORT_TYPE(x) ((x) << S_FW_RDEV_WR_RPORT_TYPE) #define G_FW_RDEV_WR_RPORT_TYPE(x) \ (((x) >> S_FW_RDEV_WR_RPORT_TYPE) & M_FW_RDEV_WR_RPORT_TYPE) #define S_FW_RDEV_WR_VFT 7 #define M_FW_RDEV_WR_VFT 0x1 #define V_FW_RDEV_WR_VFT(x) ((x) << S_FW_RDEV_WR_VFT) #define G_FW_RDEV_WR_VFT(x) (((x) >> S_FW_RDEV_WR_VFT) & M_FW_RDEV_WR_VFT) #define F_FW_RDEV_WR_VFT V_FW_RDEV_WR_VFT(1U) #define S_FW_RDEV_WR_NPIV 6 #define M_FW_RDEV_WR_NPIV 0x1 #define V_FW_RDEV_WR_NPIV(x) ((x) << S_FW_RDEV_WR_NPIV) #define G_FW_RDEV_WR_NPIV(x) \ (((x) >> S_FW_RDEV_WR_NPIV) & M_FW_RDEV_WR_NPIV) #define F_FW_RDEV_WR_NPIV V_FW_RDEV_WR_NPIV(1U) #define S_FW_RDEV_WR_CLASS 4 #define M_FW_RDEV_WR_CLASS 0x3 #define V_FW_RDEV_WR_CLASS(x) ((x) << S_FW_RDEV_WR_CLASS) #define G_FW_RDEV_WR_CLASS(x) \ (((x) >> S_FW_RDEV_WR_CLASS) & M_FW_RDEV_WR_CLASS) #define S_FW_RDEV_WR_SEQ_DEL 3 #define M_FW_RDEV_WR_SEQ_DEL 0x1 #define V_FW_RDEV_WR_SEQ_DEL(x) ((x) << S_FW_RDEV_WR_SEQ_DEL) #define G_FW_RDEV_WR_SEQ_DEL(x) \ (((x) >> S_FW_RDEV_WR_SEQ_DEL) & M_FW_RDEV_WR_SEQ_DEL) #define F_FW_RDEV_WR_SEQ_DEL V_FW_RDEV_WR_SEQ_DEL(1U) #define S_FW_RDEV_WR_PRIO_PREEMP 2 #define M_FW_RDEV_WR_PRIO_PREEMP 0x1 #define V_FW_RDEV_WR_PRIO_PREEMP(x) ((x) << S_FW_RDEV_WR_PRIO_PREEMP) #define G_FW_RDEV_WR_PRIO_PREEMP(x) \ (((x) >> S_FW_RDEV_WR_PRIO_PREEMP) & M_FW_RDEV_WR_PRIO_PREEMP) #define F_FW_RDEV_WR_PRIO_PREEMP V_FW_RDEV_WR_PRIO_PREEMP(1U) #define S_FW_RDEV_WR_PREF 1 #define M_FW_RDEV_WR_PREF 0x1 #define V_FW_RDEV_WR_PREF(x) ((x) << S_FW_RDEV_WR_PREF) #define G_FW_RDEV_WR_PREF(x) \ (((x) >> S_FW_RDEV_WR_PREF) & M_FW_RDEV_WR_PREF) #define F_FW_RDEV_WR_PREF V_FW_RDEV_WR_PREF(1U) #define S_FW_RDEV_WR_QOS 0 #define M_FW_RDEV_WR_QOS 0x1 #define V_FW_RDEV_WR_QOS(x) ((x) << S_FW_RDEV_WR_QOS) #define G_FW_RDEV_WR_QOS(x) (((x) >> S_FW_RDEV_WR_QOS) & M_FW_RDEV_WR_QOS) #define F_FW_RDEV_WR_QOS V_FW_RDEV_WR_QOS(1U) #define S_FW_RDEV_WR_ORG_PROC_ASSOC 7 #define M_FW_RDEV_WR_ORG_PROC_ASSOC 0x1 #define V_FW_RDEV_WR_ORG_PROC_ASSOC(x) ((x) << S_FW_RDEV_WR_ORG_PROC_ASSOC) #define G_FW_RDEV_WR_ORG_PROC_ASSOC(x) \ (((x) >> S_FW_RDEV_WR_ORG_PROC_ASSOC) & M_FW_RDEV_WR_ORG_PROC_ASSOC) #define F_FW_RDEV_WR_ORG_PROC_ASSOC V_FW_RDEV_WR_ORG_PROC_ASSOC(1U) #define S_FW_RDEV_WR_RSP_PROC_ASSOC 6 #define M_FW_RDEV_WR_RSP_PROC_ASSOC 0x1 #define V_FW_RDEV_WR_RSP_PROC_ASSOC(x) ((x) << S_FW_RDEV_WR_RSP_PROC_ASSOC) #define G_FW_RDEV_WR_RSP_PROC_ASSOC(x) \ (((x) >> S_FW_RDEV_WR_RSP_PROC_ASSOC) & M_FW_RDEV_WR_RSP_PROC_ASSOC) #define F_FW_RDEV_WR_RSP_PROC_ASSOC V_FW_RDEV_WR_RSP_PROC_ASSOC(1U) #define S_FW_RDEV_WR_IMAGE_PAIR 5 #define M_FW_RDEV_WR_IMAGE_PAIR 0x1 #define V_FW_RDEV_WR_IMAGE_PAIR(x) ((x) << S_FW_RDEV_WR_IMAGE_PAIR) #define G_FW_RDEV_WR_IMAGE_PAIR(x) \ (((x) >> S_FW_RDEV_WR_IMAGE_PAIR) & M_FW_RDEV_WR_IMAGE_PAIR) #define F_FW_RDEV_WR_IMAGE_PAIR V_FW_RDEV_WR_IMAGE_PAIR(1U) #define S_FW_RDEV_WR_ACC_RSP_CODE 0 #define M_FW_RDEV_WR_ACC_RSP_CODE 0x1f #define V_FW_RDEV_WR_ACC_RSP_CODE(x) ((x) << S_FW_RDEV_WR_ACC_RSP_CODE) #define G_FW_RDEV_WR_ACC_RSP_CODE(x) \ (((x) >> S_FW_RDEV_WR_ACC_RSP_CODE) & M_FW_RDEV_WR_ACC_RSP_CODE) #define S_FW_RDEV_WR_ENH_DISC 7 #define M_FW_RDEV_WR_ENH_DISC 0x1 #define V_FW_RDEV_WR_ENH_DISC(x) ((x) << S_FW_RDEV_WR_ENH_DISC) #define G_FW_RDEV_WR_ENH_DISC(x) \ (((x) >> S_FW_RDEV_WR_ENH_DISC) & M_FW_RDEV_WR_ENH_DISC) #define F_FW_RDEV_WR_ENH_DISC V_FW_RDEV_WR_ENH_DISC(1U) #define S_FW_RDEV_WR_REC 6 #define M_FW_RDEV_WR_REC 0x1 #define V_FW_RDEV_WR_REC(x) ((x) << S_FW_RDEV_WR_REC) #define G_FW_RDEV_WR_REC(x) (((x) >> S_FW_RDEV_WR_REC) & M_FW_RDEV_WR_REC) #define F_FW_RDEV_WR_REC V_FW_RDEV_WR_REC(1U) #define S_FW_RDEV_WR_TASK_RETRY_ID 5 #define M_FW_RDEV_WR_TASK_RETRY_ID 0x1 #define V_FW_RDEV_WR_TASK_RETRY_ID(x) ((x) << S_FW_RDEV_WR_TASK_RETRY_ID) #define G_FW_RDEV_WR_TASK_RETRY_ID(x) \ (((x) >> S_FW_RDEV_WR_TASK_RETRY_ID) & M_FW_RDEV_WR_TASK_RETRY_ID) #define F_FW_RDEV_WR_TASK_RETRY_ID V_FW_RDEV_WR_TASK_RETRY_ID(1U) #define S_FW_RDEV_WR_RETRY 4 #define M_FW_RDEV_WR_RETRY 0x1 #define V_FW_RDEV_WR_RETRY(x) ((x) << S_FW_RDEV_WR_RETRY) #define G_FW_RDEV_WR_RETRY(x) \ (((x) >> S_FW_RDEV_WR_RETRY) & M_FW_RDEV_WR_RETRY) #define F_FW_RDEV_WR_RETRY V_FW_RDEV_WR_RETRY(1U) #define S_FW_RDEV_WR_CONF_CMPL 3 #define M_FW_RDEV_WR_CONF_CMPL 0x1 #define V_FW_RDEV_WR_CONF_CMPL(x) ((x) << S_FW_RDEV_WR_CONF_CMPL) #define G_FW_RDEV_WR_CONF_CMPL(x) \ (((x) >> S_FW_RDEV_WR_CONF_CMPL) & M_FW_RDEV_WR_CONF_CMPL) #define F_FW_RDEV_WR_CONF_CMPL V_FW_RDEV_WR_CONF_CMPL(1U) #define S_FW_RDEV_WR_DATA_OVLY 2 #define M_FW_RDEV_WR_DATA_OVLY 0x1 #define V_FW_RDEV_WR_DATA_OVLY(x) ((x) << S_FW_RDEV_WR_DATA_OVLY) #define G_FW_RDEV_WR_DATA_OVLY(x) \ (((x) >> S_FW_RDEV_WR_DATA_OVLY) & M_FW_RDEV_WR_DATA_OVLY) #define F_FW_RDEV_WR_DATA_OVLY V_FW_RDEV_WR_DATA_OVLY(1U) #define S_FW_RDEV_WR_INI 1 #define M_FW_RDEV_WR_INI 0x1 #define V_FW_RDEV_WR_INI(x) ((x) << S_FW_RDEV_WR_INI) #define G_FW_RDEV_WR_INI(x) (((x) >> S_FW_RDEV_WR_INI) & M_FW_RDEV_WR_INI) #define F_FW_RDEV_WR_INI V_FW_RDEV_WR_INI(1U) #define S_FW_RDEV_WR_TGT 0 #define M_FW_RDEV_WR_TGT 0x1 #define V_FW_RDEV_WR_TGT(x) ((x) << S_FW_RDEV_WR_TGT) #define G_FW_RDEV_WR_TGT(x) (((x) >> S_FW_RDEV_WR_TGT) & M_FW_RDEV_WR_TGT) #define F_FW_RDEV_WR_TGT V_FW_RDEV_WR_TGT(1U) struct fw_foiscsi_node_wr { __be32 op_to_immdlen; __be32 no_sess_recv_to_len16; __u64 cookie; __u8 subop; __u8 status; __u8 alias_len; __u8 iqn_len; __be32 node_flowid; __be16 nodeid; __be16 login_retry; __be16 retry_timeout; __be16 r3; __u8 iqn[224]; __u8 alias[224]; __be32 isid_tval_to_isid_cval; }; #define S_FW_FOISCSI_NODE_WR_IMMDLEN 0 #define M_FW_FOISCSI_NODE_WR_IMMDLEN 0xffff #define V_FW_FOISCSI_NODE_WR_IMMDLEN(x) ((x) << S_FW_FOISCSI_NODE_WR_IMMDLEN) #define G_FW_FOISCSI_NODE_WR_IMMDLEN(x) \ (((x) >> S_FW_FOISCSI_NODE_WR_IMMDLEN) & M_FW_FOISCSI_NODE_WR_IMMDLEN) #define S_FW_FOISCSI_NODE_WR_NO_SESS_RECV 28 #define M_FW_FOISCSI_NODE_WR_NO_SESS_RECV 0x1 #define V_FW_FOISCSI_NODE_WR_NO_SESS_RECV(x) \ ((x) << S_FW_FOISCSI_NODE_WR_NO_SESS_RECV) #define G_FW_FOISCSI_NODE_WR_NO_SESS_RECV(x) \ (((x) >> S_FW_FOISCSI_NODE_WR_NO_SESS_RECV) & \ M_FW_FOISCSI_NODE_WR_NO_SESS_RECV) #define F_FW_FOISCSI_NODE_WR_NO_SESS_RECV \ V_FW_FOISCSI_NODE_WR_NO_SESS_RECV(1U) #define S_FW_FOISCSI_NODE_WR_ISID_TVAL 30 #define M_FW_FOISCSI_NODE_WR_ISID_TVAL 0x3 #define V_FW_FOISCSI_NODE_WR_ISID_TVAL(x) \ ((x) << S_FW_FOISCSI_NODE_WR_ISID_TVAL) #define G_FW_FOISCSI_NODE_WR_ISID_TVAL(x) \ (((x) >> S_FW_FOISCSI_NODE_WR_ISID_TVAL) & M_FW_FOISCSI_NODE_WR_ISID_TVAL) #define S_FW_FOISCSI_NODE_WR_ISID_AVAL 24 #define M_FW_FOISCSI_NODE_WR_ISID_AVAL 0x3f #define V_FW_FOISCSI_NODE_WR_ISID_AVAL(x) \ ((x) << S_FW_FOISCSI_NODE_WR_ISID_AVAL) #define G_FW_FOISCSI_NODE_WR_ISID_AVAL(x) \ (((x) >> S_FW_FOISCSI_NODE_WR_ISID_AVAL) & M_FW_FOISCSI_NODE_WR_ISID_AVAL) #define S_FW_FOISCSI_NODE_WR_ISID_BVAL 8 #define M_FW_FOISCSI_NODE_WR_ISID_BVAL 0xffff #define V_FW_FOISCSI_NODE_WR_ISID_BVAL(x) \ ((x) << S_FW_FOISCSI_NODE_WR_ISID_BVAL) #define G_FW_FOISCSI_NODE_WR_ISID_BVAL(x) \ (((x) >> S_FW_FOISCSI_NODE_WR_ISID_BVAL) & M_FW_FOISCSI_NODE_WR_ISID_BVAL) #define S_FW_FOISCSI_NODE_WR_ISID_CVAL 0 #define M_FW_FOISCSI_NODE_WR_ISID_CVAL 0xff #define V_FW_FOISCSI_NODE_WR_ISID_CVAL(x) \ ((x) << S_FW_FOISCSI_NODE_WR_ISID_CVAL) #define G_FW_FOISCSI_NODE_WR_ISID_CVAL(x) \ (((x) >> S_FW_FOISCSI_NODE_WR_ISID_CVAL) & M_FW_FOISCSI_NODE_WR_ISID_CVAL) struct fw_foiscsi_ctrl_wr { __be32 op_to_no_fin; __be32 flowid_len16; __u64 cookie; __u8 subop; __u8 status; __u8 ctrl_state; __u8 io_state; __be32 node_id; __be32 ctrl_id; __be32 io_id; struct fw_foiscsi_sess_attr { __be32 sess_type_to_erl; __be16 max_conn; __be16 max_r2t; __be16 time2wait; __be16 time2retain; __be32 max_burst; __be32 first_burst; __be32 r1; } sess_attr; struct fw_foiscsi_conn_attr { __be32 hdigest_to_tcp_ws_en; __be32 max_rcv_dsl; __be32 ping_tmo; __be16 dst_port; __be16 src_port; union fw_foiscsi_conn_attr_addr { struct fw_foiscsi_conn_attr_ipv6 { __be64 dst_addr[2]; __be64 src_addr[2]; } ipv6_addr; struct fw_foiscsi_conn_attr_ipv4 { __be32 dst_addr; __be32 src_addr; } ipv4_addr; } u; } conn_attr; __u8 tgt_name_len; __u8 r3[7]; __u8 tgt_name[FW_FOISCSI_NAME_MAX_LEN]; }; #define S_FW_FOISCSI_CTRL_WR_PORTID 1 #define M_FW_FOISCSI_CTRL_WR_PORTID 0x7 #define V_FW_FOISCSI_CTRL_WR_PORTID(x) ((x) << S_FW_FOISCSI_CTRL_WR_PORTID) #define G_FW_FOISCSI_CTRL_WR_PORTID(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_PORTID) & M_FW_FOISCSI_CTRL_WR_PORTID) #define S_FW_FOISCSI_CTRL_WR_NO_FIN 0 #define M_FW_FOISCSI_CTRL_WR_NO_FIN 0x1 #define V_FW_FOISCSI_CTRL_WR_NO_FIN(x) ((x) << S_FW_FOISCSI_CTRL_WR_NO_FIN) #define G_FW_FOISCSI_CTRL_WR_NO_FIN(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_NO_FIN) & M_FW_FOISCSI_CTRL_WR_NO_FIN) #define F_FW_FOISCSI_CTRL_WR_NO_FIN V_FW_FOISCSI_CTRL_WR_NO_FIN(1U) #define S_FW_FOISCSI_CTRL_WR_SESS_TYPE 30 #define M_FW_FOISCSI_CTRL_WR_SESS_TYPE 0x3 #define V_FW_FOISCSI_CTRL_WR_SESS_TYPE(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_SESS_TYPE) #define G_FW_FOISCSI_CTRL_WR_SESS_TYPE(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_SESS_TYPE) & M_FW_FOISCSI_CTRL_WR_SESS_TYPE) #define S_FW_FOISCSI_CTRL_WR_SEQ_INORDER 29 #define M_FW_FOISCSI_CTRL_WR_SEQ_INORDER 0x1 #define V_FW_FOISCSI_CTRL_WR_SEQ_INORDER(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_SEQ_INORDER) #define G_FW_FOISCSI_CTRL_WR_SEQ_INORDER(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_SEQ_INORDER) & \ M_FW_FOISCSI_CTRL_WR_SEQ_INORDER) #define F_FW_FOISCSI_CTRL_WR_SEQ_INORDER \ V_FW_FOISCSI_CTRL_WR_SEQ_INORDER(1U) #define S_FW_FOISCSI_CTRL_WR_PDU_INORDER 28 #define M_FW_FOISCSI_CTRL_WR_PDU_INORDER 0x1 #define V_FW_FOISCSI_CTRL_WR_PDU_INORDER(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_PDU_INORDER) #define G_FW_FOISCSI_CTRL_WR_PDU_INORDER(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_PDU_INORDER) & \ M_FW_FOISCSI_CTRL_WR_PDU_INORDER) #define F_FW_FOISCSI_CTRL_WR_PDU_INORDER \ V_FW_FOISCSI_CTRL_WR_PDU_INORDER(1U) #define S_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN 27 #define M_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN 0x1 #define V_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN) #define G_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN) & \ M_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN) #define F_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN \ V_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN(1U) #define S_FW_FOISCSI_CTRL_WR_INIT_R2T_EN 26 #define M_FW_FOISCSI_CTRL_WR_INIT_R2T_EN 0x1 #define V_FW_FOISCSI_CTRL_WR_INIT_R2T_EN(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_INIT_R2T_EN) #define G_FW_FOISCSI_CTRL_WR_INIT_R2T_EN(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_INIT_R2T_EN) & \ M_FW_FOISCSI_CTRL_WR_INIT_R2T_EN) #define F_FW_FOISCSI_CTRL_WR_INIT_R2T_EN \ V_FW_FOISCSI_CTRL_WR_INIT_R2T_EN(1U) #define S_FW_FOISCSI_CTRL_WR_ERL 24 #define M_FW_FOISCSI_CTRL_WR_ERL 0x3 #define V_FW_FOISCSI_CTRL_WR_ERL(x) ((x) << S_FW_FOISCSI_CTRL_WR_ERL) #define G_FW_FOISCSI_CTRL_WR_ERL(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_ERL) & M_FW_FOISCSI_CTRL_WR_ERL) #define S_FW_FOISCSI_CTRL_WR_HDIGEST 30 #define M_FW_FOISCSI_CTRL_WR_HDIGEST 0x3 #define V_FW_FOISCSI_CTRL_WR_HDIGEST(x) ((x) << S_FW_FOISCSI_CTRL_WR_HDIGEST) #define G_FW_FOISCSI_CTRL_WR_HDIGEST(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_HDIGEST) & M_FW_FOISCSI_CTRL_WR_HDIGEST) #define S_FW_FOISCSI_CTRL_WR_DDIGEST 28 #define M_FW_FOISCSI_CTRL_WR_DDIGEST 0x3 #define V_FW_FOISCSI_CTRL_WR_DDIGEST(x) ((x) << S_FW_FOISCSI_CTRL_WR_DDIGEST) #define G_FW_FOISCSI_CTRL_WR_DDIGEST(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_DDIGEST) & M_FW_FOISCSI_CTRL_WR_DDIGEST) #define S_FW_FOISCSI_CTRL_WR_AUTH_METHOD 25 #define M_FW_FOISCSI_CTRL_WR_AUTH_METHOD 0x7 #define V_FW_FOISCSI_CTRL_WR_AUTH_METHOD(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_AUTH_METHOD) #define G_FW_FOISCSI_CTRL_WR_AUTH_METHOD(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_AUTH_METHOD) & \ M_FW_FOISCSI_CTRL_WR_AUTH_METHOD) #define S_FW_FOISCSI_CTRL_WR_AUTH_POLICY 23 #define M_FW_FOISCSI_CTRL_WR_AUTH_POLICY 0x3 #define V_FW_FOISCSI_CTRL_WR_AUTH_POLICY(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_AUTH_POLICY) #define G_FW_FOISCSI_CTRL_WR_AUTH_POLICY(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_AUTH_POLICY) & \ M_FW_FOISCSI_CTRL_WR_AUTH_POLICY) #define S_FW_FOISCSI_CTRL_WR_DDP_PGSZ 21 #define M_FW_FOISCSI_CTRL_WR_DDP_PGSZ 0x3 #define V_FW_FOISCSI_CTRL_WR_DDP_PGSZ(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_DDP_PGSZ) #define G_FW_FOISCSI_CTRL_WR_DDP_PGSZ(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_DDP_PGSZ) & M_FW_FOISCSI_CTRL_WR_DDP_PGSZ) #define S_FW_FOISCSI_CTRL_WR_IPV6 20 #define M_FW_FOISCSI_CTRL_WR_IPV6 0x1 #define V_FW_FOISCSI_CTRL_WR_IPV6(x) ((x) << S_FW_FOISCSI_CTRL_WR_IPV6) #define G_FW_FOISCSI_CTRL_WR_IPV6(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_IPV6) & M_FW_FOISCSI_CTRL_WR_IPV6) #define F_FW_FOISCSI_CTRL_WR_IPV6 V_FW_FOISCSI_CTRL_WR_IPV6(1U) #define S_FW_FOISCSI_CTRL_WR_DDP_PGIDX 16 #define M_FW_FOISCSI_CTRL_WR_DDP_PGIDX 0xf #define V_FW_FOISCSI_CTRL_WR_DDP_PGIDX(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_DDP_PGIDX) #define G_FW_FOISCSI_CTRL_WR_DDP_PGIDX(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_DDP_PGIDX) & M_FW_FOISCSI_CTRL_WR_DDP_PGIDX) #define S_FW_FOISCSI_CTRL_WR_TCP_WS 12 #define M_FW_FOISCSI_CTRL_WR_TCP_WS 0xf #define V_FW_FOISCSI_CTRL_WR_TCP_WS(x) ((x) << S_FW_FOISCSI_CTRL_WR_TCP_WS) #define G_FW_FOISCSI_CTRL_WR_TCP_WS(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_TCP_WS) & M_FW_FOISCSI_CTRL_WR_TCP_WS) #define S_FW_FOISCSI_CTRL_WR_TCP_WS_EN 11 #define M_FW_FOISCSI_CTRL_WR_TCP_WS_EN 0x1 #define V_FW_FOISCSI_CTRL_WR_TCP_WS_EN(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_TCP_WS_EN) #define G_FW_FOISCSI_CTRL_WR_TCP_WS_EN(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_TCP_WS_EN) & M_FW_FOISCSI_CTRL_WR_TCP_WS_EN) #define F_FW_FOISCSI_CTRL_WR_TCP_WS_EN V_FW_FOISCSI_CTRL_WR_TCP_WS_EN(1U) struct fw_foiscsi_chap_wr { __be32 op_to_kv_flag; __be32 flowid_len16; __u64 cookie; __u8 status; union fw_foiscsi_len { struct fw_foiscsi_chap_lens { __u8 id_len; __u8 sec_len; } chapl; struct fw_foiscsi_vend_kv_lens { __u8 key_len; __u8 val_len; } vend_kvl; } lenu; __u8 node_type; __be16 node_id; __u8 r3[2]; union fw_foiscsi_chap_vend { struct fw_foiscsi_chap { __u8 chap_id[224]; __u8 chap_sec[128]; } chap; struct fw_foiscsi_vend_kv { __u8 vend_key[64]; __u8 vend_val[256]; } vend_kv; } u; }; #define S_FW_FOISCSI_CHAP_WR_KV_FLAG 20 #define M_FW_FOISCSI_CHAP_WR_KV_FLAG 0x1 #define V_FW_FOISCSI_CHAP_WR_KV_FLAG(x) ((x) << S_FW_FOISCSI_CHAP_WR_KV_FLAG) #define G_FW_FOISCSI_CHAP_WR_KV_FLAG(x) \ (((x) >> S_FW_FOISCSI_CHAP_WR_KV_FLAG) & M_FW_FOISCSI_CHAP_WR_KV_FLAG) #define F_FW_FOISCSI_CHAP_WR_KV_FLAG V_FW_FOISCSI_CHAP_WR_KV_FLAG(1U) /****************************************************************************** * C O i S C S I W O R K R E Q U E S T S ********************************************/ enum fw_chnet_addr_type { FW_CHNET_ADDD_TYPE_NONE = 0, FW_CHNET_ADDR_TYPE_IPV4, FW_CHNET_ADDR_TYPE_IPV6, }; enum fw_msg_wr_type { FW_MSG_WR_TYPE_RPL = 0, FW_MSG_WR_TYPE_ERR, FW_MSG_WR_TYPE_PLD, }; struct fw_coiscsi_tgt_wr { __be32 op_compl; __be32 flowid_len16; __u64 cookie; __u8 subop; __u8 status; __be16 r4; __be32 flags; struct fw_coiscsi_tgt_conn_attr { __be32 in_tid; __be16 in_port; __u8 in_type; __u8 r6; union fw_coiscsi_tgt_conn_attr_addr { struct fw_coiscsi_tgt_conn_attr_in_addr { __be32 addr; __be32 r7; __be32 r8[2]; } in_addr; struct fw_coiscsi_tgt_conn_attr_in_addr6 { __be64 addr[2]; } in_addr6; } u; } conn_attr; }; #define S_FW_COISCSI_TGT_WR_PORTID 0 #define M_FW_COISCSI_TGT_WR_PORTID 0x7 #define V_FW_COISCSI_TGT_WR_PORTID(x) ((x) << S_FW_COISCSI_TGT_WR_PORTID) #define G_FW_COISCSI_TGT_WR_PORTID(x) \ (((x) >> S_FW_COISCSI_TGT_WR_PORTID) & M_FW_COISCSI_TGT_WR_PORTID) struct fw_coiscsi_tgt_conn_wr { __be32 op_compl; __be32 flowid_len16; __u64 cookie; __u8 subop; __u8 status; __be16 iq_id; __be32 in_stid; __be32 io_id; __be32 flags_fin; union { struct fw_coiscsi_tgt_conn_tcp { __be16 in_sport; __be16 in_dport; __u8 wscale_wsen; __u8 r4[3]; union fw_coiscsi_tgt_conn_tcp_addr { struct fw_coiscsi_tgt_conn_tcp_in_addr { __be32 saddr; __be32 daddr; } in_addr; struct fw_coiscsi_tgt_conn_tcp_in_addr6 { __be64 saddr[2]; __be64 daddr[2]; } in_addr6; } u; } conn_tcp; struct fw_coiscsi_tgt_conn_stats { __be32 ddp_reqs; __be32 ddp_cmpls; __be16 ddp_aborts; __be16 ddp_bps; } stats; } u; struct fw_coiscsi_tgt_conn_iscsi { __be32 hdigest_to_ddp_pgsz; __be32 tgt_id; __be16 max_r2t; __be16 r5; __be32 max_burst; __be32 max_rdsl; __be32 max_tdsl; __be32 cur_sn; __be32 r6; } conn_iscsi; }; #define S_FW_COISCSI_TGT_CONN_WR_PORTID 0 #define M_FW_COISCSI_TGT_CONN_WR_PORTID 0x7 #define V_FW_COISCSI_TGT_CONN_WR_PORTID(x) \ ((x) << S_FW_COISCSI_TGT_CONN_WR_PORTID) #define G_FW_COISCSI_TGT_CONN_WR_PORTID(x) \ (((x) >> S_FW_COISCSI_TGT_CONN_WR_PORTID) & \ M_FW_COISCSI_TGT_CONN_WR_PORTID) #define S_FW_COISCSI_TGT_CONN_WR_FIN 0 #define M_FW_COISCSI_TGT_CONN_WR_FIN 0x1 #define V_FW_COISCSI_TGT_CONN_WR_FIN(x) ((x) << S_FW_COISCSI_TGT_CONN_WR_FIN) #define G_FW_COISCSI_TGT_CONN_WR_FIN(x) \ (((x) >> S_FW_COISCSI_TGT_CONN_WR_FIN) & M_FW_COISCSI_TGT_CONN_WR_FIN) #define F_FW_COISCSI_TGT_CONN_WR_FIN V_FW_COISCSI_TGT_CONN_WR_FIN(1U) #define S_FW_COISCSI_TGT_CONN_WR_WSCALE 1 #define M_FW_COISCSI_TGT_CONN_WR_WSCALE 0xf #define V_FW_COISCSI_TGT_CONN_WR_WSCALE(x) \ ((x) << S_FW_COISCSI_TGT_CONN_WR_WSCALE) #define G_FW_COISCSI_TGT_CONN_WR_WSCALE(x) \ (((x) >> S_FW_COISCSI_TGT_CONN_WR_WSCALE) & \ M_FW_COISCSI_TGT_CONN_WR_WSCALE) #define S_FW_COISCSI_TGT_CONN_WR_WSEN 0 #define M_FW_COISCSI_TGT_CONN_WR_WSEN 0x1 #define V_FW_COISCSI_TGT_CONN_WR_WSEN(x) \ ((x) << S_FW_COISCSI_TGT_CONN_WR_WSEN) #define G_FW_COISCSI_TGT_CONN_WR_WSEN(x) \ (((x) >> S_FW_COISCSI_TGT_CONN_WR_WSEN) & M_FW_COISCSI_TGT_CONN_WR_WSEN) #define F_FW_COISCSI_TGT_CONN_WR_WSEN V_FW_COISCSI_TGT_CONN_WR_WSEN(1U) struct fw_coiscsi_tgt_xmit_wr { __be32 op_to_immdlen; union { struct cmpl_stat { __be32 cmpl_status_pkd; } cs; struct flowid_len { __be32 flowid_len16; } fllen; } u; __u64 cookie; __be16 iq_id; __be16 r3; __be32 pz_off; __be32 t_xfer_len; union { __be32 tag; __be32 datasn; __be32 ddp_status; } cu; }; #define S_FW_COISCSI_TGT_XMIT_WR_DDGST 23 #define M_FW_COISCSI_TGT_XMIT_WR_DDGST 0x1 #define V_FW_COISCSI_TGT_XMIT_WR_DDGST(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_DDGST) #define G_FW_COISCSI_TGT_XMIT_WR_DDGST(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_DDGST) & M_FW_COISCSI_TGT_XMIT_WR_DDGST) #define F_FW_COISCSI_TGT_XMIT_WR_DDGST V_FW_COISCSI_TGT_XMIT_WR_DDGST(1U) #define S_FW_COISCSI_TGT_XMIT_WR_HDGST 22 #define M_FW_COISCSI_TGT_XMIT_WR_HDGST 0x1 #define V_FW_COISCSI_TGT_XMIT_WR_HDGST(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_HDGST) #define G_FW_COISCSI_TGT_XMIT_WR_HDGST(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_HDGST) & M_FW_COISCSI_TGT_XMIT_WR_HDGST) #define F_FW_COISCSI_TGT_XMIT_WR_HDGST V_FW_COISCSI_TGT_XMIT_WR_HDGST(1U) #define S_FW_COISCSI_TGT_XMIT_WR_DDP 20 #define M_FW_COISCSI_TGT_XMIT_WR_DDP 0x1 #define V_FW_COISCSI_TGT_XMIT_WR_DDP(x) ((x) << S_FW_COISCSI_TGT_XMIT_WR_DDP) #define G_FW_COISCSI_TGT_XMIT_WR_DDP(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_DDP) & M_FW_COISCSI_TGT_XMIT_WR_DDP) #define F_FW_COISCSI_TGT_XMIT_WR_DDP V_FW_COISCSI_TGT_XMIT_WR_DDP(1U) #define S_FW_COISCSI_TGT_XMIT_WR_ABORT 19 #define M_FW_COISCSI_TGT_XMIT_WR_ABORT 0x1 #define V_FW_COISCSI_TGT_XMIT_WR_ABORT(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_ABORT) #define G_FW_COISCSI_TGT_XMIT_WR_ABORT(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_ABORT) & M_FW_COISCSI_TGT_XMIT_WR_ABORT) #define F_FW_COISCSI_TGT_XMIT_WR_ABORT V_FW_COISCSI_TGT_XMIT_WR_ABORT(1U) #define S_FW_COISCSI_TGT_XMIT_WR_FINAL 18 #define M_FW_COISCSI_TGT_XMIT_WR_FINAL 0x1 #define V_FW_COISCSI_TGT_XMIT_WR_FINAL(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_FINAL) #define G_FW_COISCSI_TGT_XMIT_WR_FINAL(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_FINAL) & M_FW_COISCSI_TGT_XMIT_WR_FINAL) #define F_FW_COISCSI_TGT_XMIT_WR_FINAL V_FW_COISCSI_TGT_XMIT_WR_FINAL(1U) #define S_FW_COISCSI_TGT_XMIT_WR_PADLEN 16 #define M_FW_COISCSI_TGT_XMIT_WR_PADLEN 0x3 #define V_FW_COISCSI_TGT_XMIT_WR_PADLEN(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_PADLEN) #define G_FW_COISCSI_TGT_XMIT_WR_PADLEN(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_PADLEN) & \ M_FW_COISCSI_TGT_XMIT_WR_PADLEN) #define S_FW_COISCSI_TGT_XMIT_WR_INCSTATSN 15 #define M_FW_COISCSI_TGT_XMIT_WR_INCSTATSN 0x1 #define V_FW_COISCSI_TGT_XMIT_WR_INCSTATSN(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_INCSTATSN) #define G_FW_COISCSI_TGT_XMIT_WR_INCSTATSN(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_INCSTATSN) & \ M_FW_COISCSI_TGT_XMIT_WR_INCSTATSN) #define F_FW_COISCSI_TGT_XMIT_WR_INCSTATSN \ V_FW_COISCSI_TGT_XMIT_WR_INCSTATSN(1U) #define S_FW_COISCSI_TGT_XMIT_WR_IMMDLEN 0 #define M_FW_COISCSI_TGT_XMIT_WR_IMMDLEN 0xff #define V_FW_COISCSI_TGT_XMIT_WR_IMMDLEN(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_IMMDLEN) #define G_FW_COISCSI_TGT_XMIT_WR_IMMDLEN(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_IMMDLEN) & \ M_FW_COISCSI_TGT_XMIT_WR_IMMDLEN) #define S_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS 8 #define M_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS 0xff #define V_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS) #define G_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS) & \ M_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS) struct fw_coiscsi_stats_wr { __be32 op_compl; __be32 flowid_len16; __u64 cookie; __u8 subop; __u8 status; union fw_coiscsi_stats { struct fw_coiscsi_resource { __u8 num_ipv4_tgt; __u8 num_ipv6_tgt; __be16 num_l2t_entries; __be16 num_csocks; __be16 num_tasks; __be16 num_ppods_zone[11]; __be32 num_bufll64; __u8 r2[12]; } rsrc; } u; }; #define S_FW_COISCSI_STATS_WR_PORTID 0 #define M_FW_COISCSI_STATS_WR_PORTID 0x7 #define V_FW_COISCSI_STATS_WR_PORTID(x) ((x) << S_FW_COISCSI_STATS_WR_PORTID) #define G_FW_COISCSI_STATS_WR_PORTID(x) \ (((x) >> S_FW_COISCSI_STATS_WR_PORTID) & M_FW_COISCSI_STATS_WR_PORTID) struct fw_isns_wr { __be32 op_compl; __be32 flowid_len16; __u64 cookie; __u8 subop; __u8 status; __be16 iq_id; __be16 vlanid; __be16 r4; struct fw_tcp_conn_attr { __be32 in_tid; __be16 in_port; __u8 in_type; __u8 r6; union fw_tcp_conn_attr_addr { struct fw_tcp_conn_attr_in_addr { __be32 addr; __be32 r7; __be32 r8[2]; } in_addr; struct fw_tcp_conn_attr_in_addr6 { __be64 addr[2]; } in_addr6; } u; } conn_attr; }; #define S_FW_ISNS_WR_PORTID 0 #define M_FW_ISNS_WR_PORTID 0x7 #define V_FW_ISNS_WR_PORTID(x) ((x) << S_FW_ISNS_WR_PORTID) #define G_FW_ISNS_WR_PORTID(x) \ (((x) >> S_FW_ISNS_WR_PORTID) & M_FW_ISNS_WR_PORTID) struct fw_isns_xmit_wr { __be32 op_to_immdlen; __be32 flowid_len16; __u64 cookie; __be16 iq_id; __be16 r4; __be32 xfer_len; __be64 r5; }; #define S_FW_ISNS_XMIT_WR_IMMDLEN 0 #define M_FW_ISNS_XMIT_WR_IMMDLEN 0xff #define V_FW_ISNS_XMIT_WR_IMMDLEN(x) ((x) << S_FW_ISNS_XMIT_WR_IMMDLEN) #define G_FW_ISNS_XMIT_WR_IMMDLEN(x) \ (((x) >> S_FW_ISNS_XMIT_WR_IMMDLEN) & M_FW_ISNS_XMIT_WR_IMMDLEN) /****************************************************************************** * F O F C O E W O R K R E Q U E S T s *******************************************/ struct fw_fcoe_els_ct_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 tmo_val; __u8 els_ct_type; __u8 ctl_pri; __u8 cp_en_class; __be16 xfer_cnt; __u8 fl_to_sp; __u8 l_id[3]; __u8 r5; __u8 r_id[3]; __be64 rsp_dmaaddr; __be32 rsp_dmalen; __be32 r6; }; #define S_FW_FCOE_ELS_CT_WR_OPCODE 24 #define M_FW_FCOE_ELS_CT_WR_OPCODE 0xff #define V_FW_FCOE_ELS_CT_WR_OPCODE(x) ((x) << S_FW_FCOE_ELS_CT_WR_OPCODE) #define G_FW_FCOE_ELS_CT_WR_OPCODE(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_OPCODE) & M_FW_FCOE_ELS_CT_WR_OPCODE) #define S_FW_FCOE_ELS_CT_WR_IMMDLEN 0 #define M_FW_FCOE_ELS_CT_WR_IMMDLEN 0xff #define V_FW_FCOE_ELS_CT_WR_IMMDLEN(x) ((x) << S_FW_FCOE_ELS_CT_WR_IMMDLEN) #define G_FW_FCOE_ELS_CT_WR_IMMDLEN(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_IMMDLEN) & M_FW_FCOE_ELS_CT_WR_IMMDLEN) #define S_FW_FCOE_ELS_CT_WR_FLOWID 8 #define M_FW_FCOE_ELS_CT_WR_FLOWID 0xfffff #define V_FW_FCOE_ELS_CT_WR_FLOWID(x) ((x) << S_FW_FCOE_ELS_CT_WR_FLOWID) #define G_FW_FCOE_ELS_CT_WR_FLOWID(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_FLOWID) & M_FW_FCOE_ELS_CT_WR_FLOWID) #define S_FW_FCOE_ELS_CT_WR_LEN16 0 #define M_FW_FCOE_ELS_CT_WR_LEN16 0xff #define V_FW_FCOE_ELS_CT_WR_LEN16(x) ((x) << S_FW_FCOE_ELS_CT_WR_LEN16) #define G_FW_FCOE_ELS_CT_WR_LEN16(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_LEN16) & M_FW_FCOE_ELS_CT_WR_LEN16) #define S_FW_FCOE_ELS_CT_WR_CP_EN 6 #define M_FW_FCOE_ELS_CT_WR_CP_EN 0x3 #define V_FW_FCOE_ELS_CT_WR_CP_EN(x) ((x) << S_FW_FCOE_ELS_CT_WR_CP_EN) #define G_FW_FCOE_ELS_CT_WR_CP_EN(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_CP_EN) & M_FW_FCOE_ELS_CT_WR_CP_EN) #define S_FW_FCOE_ELS_CT_WR_CLASS 4 #define M_FW_FCOE_ELS_CT_WR_CLASS 0x3 #define V_FW_FCOE_ELS_CT_WR_CLASS(x) ((x) << S_FW_FCOE_ELS_CT_WR_CLASS) #define G_FW_FCOE_ELS_CT_WR_CLASS(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_CLASS) & M_FW_FCOE_ELS_CT_WR_CLASS) #define S_FW_FCOE_ELS_CT_WR_FL 2 #define M_FW_FCOE_ELS_CT_WR_FL 0x1 #define V_FW_FCOE_ELS_CT_WR_FL(x) ((x) << S_FW_FCOE_ELS_CT_WR_FL) #define G_FW_FCOE_ELS_CT_WR_FL(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_FL) & M_FW_FCOE_ELS_CT_WR_FL) #define F_FW_FCOE_ELS_CT_WR_FL V_FW_FCOE_ELS_CT_WR_FL(1U) #define S_FW_FCOE_ELS_CT_WR_NPIV 1 #define M_FW_FCOE_ELS_CT_WR_NPIV 0x1 #define V_FW_FCOE_ELS_CT_WR_NPIV(x) ((x) << S_FW_FCOE_ELS_CT_WR_NPIV) #define G_FW_FCOE_ELS_CT_WR_NPIV(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_NPIV) & M_FW_FCOE_ELS_CT_WR_NPIV) #define F_FW_FCOE_ELS_CT_WR_NPIV V_FW_FCOE_ELS_CT_WR_NPIV(1U) #define S_FW_FCOE_ELS_CT_WR_SP 0 #define M_FW_FCOE_ELS_CT_WR_SP 0x1 #define V_FW_FCOE_ELS_CT_WR_SP(x) ((x) << S_FW_FCOE_ELS_CT_WR_SP) #define G_FW_FCOE_ELS_CT_WR_SP(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_SP) & M_FW_FCOE_ELS_CT_WR_SP) #define F_FW_FCOE_ELS_CT_WR_SP V_FW_FCOE_ELS_CT_WR_SP(1U) /****************************************************************************** * S C S I W O R K R E Q U E S T s (FOiSCSI and FCOE unified data path) *****************************************************************************/ struct fw_scsi_write_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 tmo_val; __u8 use_xfer_cnt; union fw_scsi_write_priv { struct fcoe_write_priv { __u8 ctl_pri; __u8 cp_en_class; __u8 r3_lo[2]; } fcoe; struct iscsi_write_priv { __u8 r3[4]; } iscsi; } u; __be32 xfer_cnt; __be32 ini_xfer_cnt; __be64 rsp_dmaaddr; __be32 rsp_dmalen; __be32 r4; }; #define S_FW_SCSI_WRITE_WR_OPCODE 24 #define M_FW_SCSI_WRITE_WR_OPCODE 0xff #define V_FW_SCSI_WRITE_WR_OPCODE(x) ((x) << S_FW_SCSI_WRITE_WR_OPCODE) #define G_FW_SCSI_WRITE_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_WRITE_WR_OPCODE) & M_FW_SCSI_WRITE_WR_OPCODE) #define S_FW_SCSI_WRITE_WR_IMMDLEN 0 #define M_FW_SCSI_WRITE_WR_IMMDLEN 0xff #define V_FW_SCSI_WRITE_WR_IMMDLEN(x) ((x) << S_FW_SCSI_WRITE_WR_IMMDLEN) #define G_FW_SCSI_WRITE_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_WRITE_WR_IMMDLEN) & M_FW_SCSI_WRITE_WR_IMMDLEN) #define S_FW_SCSI_WRITE_WR_FLOWID 8 #define M_FW_SCSI_WRITE_WR_FLOWID 0xfffff #define V_FW_SCSI_WRITE_WR_FLOWID(x) ((x) << S_FW_SCSI_WRITE_WR_FLOWID) #define G_FW_SCSI_WRITE_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_WRITE_WR_FLOWID) & M_FW_SCSI_WRITE_WR_FLOWID) #define S_FW_SCSI_WRITE_WR_LEN16 0 #define M_FW_SCSI_WRITE_WR_LEN16 0xff #define V_FW_SCSI_WRITE_WR_LEN16(x) ((x) << S_FW_SCSI_WRITE_WR_LEN16) #define G_FW_SCSI_WRITE_WR_LEN16(x) \ (((x) >> S_FW_SCSI_WRITE_WR_LEN16) & M_FW_SCSI_WRITE_WR_LEN16) #define S_FW_SCSI_WRITE_WR_CP_EN 6 #define M_FW_SCSI_WRITE_WR_CP_EN 0x3 #define V_FW_SCSI_WRITE_WR_CP_EN(x) ((x) << S_FW_SCSI_WRITE_WR_CP_EN) #define G_FW_SCSI_WRITE_WR_CP_EN(x) \ (((x) >> S_FW_SCSI_WRITE_WR_CP_EN) & M_FW_SCSI_WRITE_WR_CP_EN) #define S_FW_SCSI_WRITE_WR_CLASS 4 #define M_FW_SCSI_WRITE_WR_CLASS 0x3 #define V_FW_SCSI_WRITE_WR_CLASS(x) ((x) << S_FW_SCSI_WRITE_WR_CLASS) #define G_FW_SCSI_WRITE_WR_CLASS(x) \ (((x) >> S_FW_SCSI_WRITE_WR_CLASS) & M_FW_SCSI_WRITE_WR_CLASS) struct fw_scsi_read_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 tmo_val; __u8 use_xfer_cnt; union fw_scsi_read_priv { struct fcoe_read_priv { __u8 ctl_pri; __u8 cp_en_class; __u8 r3_lo[2]; } fcoe; struct iscsi_read_priv { __u8 r3[4]; } iscsi; } u; __be32 xfer_cnt; __be32 ini_xfer_cnt; __be64 rsp_dmaaddr; __be32 rsp_dmalen; __be32 r4; }; #define S_FW_SCSI_READ_WR_OPCODE 24 #define M_FW_SCSI_READ_WR_OPCODE 0xff #define V_FW_SCSI_READ_WR_OPCODE(x) ((x) << S_FW_SCSI_READ_WR_OPCODE) #define G_FW_SCSI_READ_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_READ_WR_OPCODE) & M_FW_SCSI_READ_WR_OPCODE) #define S_FW_SCSI_READ_WR_IMMDLEN 0 #define M_FW_SCSI_READ_WR_IMMDLEN 0xff #define V_FW_SCSI_READ_WR_IMMDLEN(x) ((x) << S_FW_SCSI_READ_WR_IMMDLEN) #define G_FW_SCSI_READ_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_READ_WR_IMMDLEN) & M_FW_SCSI_READ_WR_IMMDLEN) #define S_FW_SCSI_READ_WR_FLOWID 8 #define M_FW_SCSI_READ_WR_FLOWID 0xfffff #define V_FW_SCSI_READ_WR_FLOWID(x) ((x) << S_FW_SCSI_READ_WR_FLOWID) #define G_FW_SCSI_READ_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_READ_WR_FLOWID) & M_FW_SCSI_READ_WR_FLOWID) #define S_FW_SCSI_READ_WR_LEN16 0 #define M_FW_SCSI_READ_WR_LEN16 0xff #define V_FW_SCSI_READ_WR_LEN16(x) ((x) << S_FW_SCSI_READ_WR_LEN16) #define G_FW_SCSI_READ_WR_LEN16(x) \ (((x) >> S_FW_SCSI_READ_WR_LEN16) & M_FW_SCSI_READ_WR_LEN16) #define S_FW_SCSI_READ_WR_CP_EN 6 #define M_FW_SCSI_READ_WR_CP_EN 0x3 #define V_FW_SCSI_READ_WR_CP_EN(x) ((x) << S_FW_SCSI_READ_WR_CP_EN) #define G_FW_SCSI_READ_WR_CP_EN(x) \ (((x) >> S_FW_SCSI_READ_WR_CP_EN) & M_FW_SCSI_READ_WR_CP_EN) #define S_FW_SCSI_READ_WR_CLASS 4 #define M_FW_SCSI_READ_WR_CLASS 0x3 #define V_FW_SCSI_READ_WR_CLASS(x) ((x) << S_FW_SCSI_READ_WR_CLASS) #define G_FW_SCSI_READ_WR_CLASS(x) \ (((x) >> S_FW_SCSI_READ_WR_CLASS) & M_FW_SCSI_READ_WR_CLASS) struct fw_scsi_cmd_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 tmo_val; __u8 r3; union fw_scsi_cmd_priv { struct fcoe_cmd_priv { __u8 ctl_pri; __u8 cp_en_class; __u8 r4_lo[2]; } fcoe; struct iscsi_cmd_priv { __u8 r4[4]; } iscsi; } u; __u8 r5[8]; __be64 rsp_dmaaddr; __be32 rsp_dmalen; __be32 r6; }; #define S_FW_SCSI_CMD_WR_OPCODE 24 #define M_FW_SCSI_CMD_WR_OPCODE 0xff #define V_FW_SCSI_CMD_WR_OPCODE(x) ((x) << S_FW_SCSI_CMD_WR_OPCODE) #define G_FW_SCSI_CMD_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_CMD_WR_OPCODE) & M_FW_SCSI_CMD_WR_OPCODE) #define S_FW_SCSI_CMD_WR_IMMDLEN 0 #define M_FW_SCSI_CMD_WR_IMMDLEN 0xff #define V_FW_SCSI_CMD_WR_IMMDLEN(x) ((x) << S_FW_SCSI_CMD_WR_IMMDLEN) #define G_FW_SCSI_CMD_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_CMD_WR_IMMDLEN) & M_FW_SCSI_CMD_WR_IMMDLEN) #define S_FW_SCSI_CMD_WR_FLOWID 8 #define M_FW_SCSI_CMD_WR_FLOWID 0xfffff #define V_FW_SCSI_CMD_WR_FLOWID(x) ((x) << S_FW_SCSI_CMD_WR_FLOWID) #define G_FW_SCSI_CMD_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_CMD_WR_FLOWID) & M_FW_SCSI_CMD_WR_FLOWID) #define S_FW_SCSI_CMD_WR_LEN16 0 #define M_FW_SCSI_CMD_WR_LEN16 0xff #define V_FW_SCSI_CMD_WR_LEN16(x) ((x) << S_FW_SCSI_CMD_WR_LEN16) #define G_FW_SCSI_CMD_WR_LEN16(x) \ (((x) >> S_FW_SCSI_CMD_WR_LEN16) & M_FW_SCSI_CMD_WR_LEN16) #define S_FW_SCSI_CMD_WR_CP_EN 6 #define M_FW_SCSI_CMD_WR_CP_EN 0x3 #define V_FW_SCSI_CMD_WR_CP_EN(x) ((x) << S_FW_SCSI_CMD_WR_CP_EN) #define G_FW_SCSI_CMD_WR_CP_EN(x) \ (((x) >> S_FW_SCSI_CMD_WR_CP_EN) & M_FW_SCSI_CMD_WR_CP_EN) #define S_FW_SCSI_CMD_WR_CLASS 4 #define M_FW_SCSI_CMD_WR_CLASS 0x3 #define V_FW_SCSI_CMD_WR_CLASS(x) ((x) << S_FW_SCSI_CMD_WR_CLASS) #define G_FW_SCSI_CMD_WR_CLASS(x) \ (((x) >> S_FW_SCSI_CMD_WR_CLASS) & M_FW_SCSI_CMD_WR_CLASS) struct fw_scsi_abrt_cls_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 tmo_val; __u8 sub_opcode_to_chk_all_io; __u8 r3[4]; __be64 t_cookie; }; #define S_FW_SCSI_ABRT_CLS_WR_OPCODE 24 #define M_FW_SCSI_ABRT_CLS_WR_OPCODE 0xff #define V_FW_SCSI_ABRT_CLS_WR_OPCODE(x) ((x) << S_FW_SCSI_ABRT_CLS_WR_OPCODE) #define G_FW_SCSI_ABRT_CLS_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_OPCODE) & M_FW_SCSI_ABRT_CLS_WR_OPCODE) #define S_FW_SCSI_ABRT_CLS_WR_IMMDLEN 0 #define M_FW_SCSI_ABRT_CLS_WR_IMMDLEN 0xff #define V_FW_SCSI_ABRT_CLS_WR_IMMDLEN(x) \ ((x) << S_FW_SCSI_ABRT_CLS_WR_IMMDLEN) #define G_FW_SCSI_ABRT_CLS_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_IMMDLEN) & M_FW_SCSI_ABRT_CLS_WR_IMMDLEN) #define S_FW_SCSI_ABRT_CLS_WR_FLOWID 8 #define M_FW_SCSI_ABRT_CLS_WR_FLOWID 0xfffff #define V_FW_SCSI_ABRT_CLS_WR_FLOWID(x) ((x) << S_FW_SCSI_ABRT_CLS_WR_FLOWID) #define G_FW_SCSI_ABRT_CLS_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_FLOWID) & M_FW_SCSI_ABRT_CLS_WR_FLOWID) #define S_FW_SCSI_ABRT_CLS_WR_LEN16 0 #define M_FW_SCSI_ABRT_CLS_WR_LEN16 0xff #define V_FW_SCSI_ABRT_CLS_WR_LEN16(x) ((x) << S_FW_SCSI_ABRT_CLS_WR_LEN16) #define G_FW_SCSI_ABRT_CLS_WR_LEN16(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_LEN16) & M_FW_SCSI_ABRT_CLS_WR_LEN16) #define S_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE 2 #define M_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE 0x3f #define V_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) \ ((x) << S_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE) #define G_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE) & \ M_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE) #define S_FW_SCSI_ABRT_CLS_WR_UNSOL 1 #define M_FW_SCSI_ABRT_CLS_WR_UNSOL 0x1 #define V_FW_SCSI_ABRT_CLS_WR_UNSOL(x) ((x) << S_FW_SCSI_ABRT_CLS_WR_UNSOL) #define G_FW_SCSI_ABRT_CLS_WR_UNSOL(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_UNSOL) & M_FW_SCSI_ABRT_CLS_WR_UNSOL) #define F_FW_SCSI_ABRT_CLS_WR_UNSOL V_FW_SCSI_ABRT_CLS_WR_UNSOL(1U) #define S_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO 0 #define M_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO 0x1 #define V_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) \ ((x) << S_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO) #define G_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO) & \ M_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO) #define F_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO \ V_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(1U) struct fw_scsi_tgt_acc_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 r3; __u8 use_burst_len; union fw_scsi_tgt_acc_priv { struct fcoe_tgt_acc_priv { __u8 ctl_pri; __u8 cp_en_class; __u8 r4_lo[2]; } fcoe; struct iscsi_tgt_acc_priv { __u8 r4[4]; } iscsi; } u; __be32 burst_len; __be32 rel_off; __be64 r5; __be32 r6; __be32 tot_xfer_len; }; #define S_FW_SCSI_TGT_ACC_WR_OPCODE 24 #define M_FW_SCSI_TGT_ACC_WR_OPCODE 0xff #define V_FW_SCSI_TGT_ACC_WR_OPCODE(x) ((x) << S_FW_SCSI_TGT_ACC_WR_OPCODE) #define G_FW_SCSI_TGT_ACC_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_TGT_ACC_WR_OPCODE) & M_FW_SCSI_TGT_ACC_WR_OPCODE) #define S_FW_SCSI_TGT_ACC_WR_IMMDLEN 0 #define M_FW_SCSI_TGT_ACC_WR_IMMDLEN 0xff #define V_FW_SCSI_TGT_ACC_WR_IMMDLEN(x) ((x) << S_FW_SCSI_TGT_ACC_WR_IMMDLEN) #define G_FW_SCSI_TGT_ACC_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_TGT_ACC_WR_IMMDLEN) & M_FW_SCSI_TGT_ACC_WR_IMMDLEN) #define S_FW_SCSI_TGT_ACC_WR_FLOWID 8 #define M_FW_SCSI_TGT_ACC_WR_FLOWID 0xfffff #define V_FW_SCSI_TGT_ACC_WR_FLOWID(x) ((x) << S_FW_SCSI_TGT_ACC_WR_FLOWID) #define G_FW_SCSI_TGT_ACC_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_TGT_ACC_WR_FLOWID) & M_FW_SCSI_TGT_ACC_WR_FLOWID) #define S_FW_SCSI_TGT_ACC_WR_LEN16 0 #define M_FW_SCSI_TGT_ACC_WR_LEN16 0xff #define V_FW_SCSI_TGT_ACC_WR_LEN16(x) ((x) << S_FW_SCSI_TGT_ACC_WR_LEN16) #define G_FW_SCSI_TGT_ACC_WR_LEN16(x) \ (((x) >> S_FW_SCSI_TGT_ACC_WR_LEN16) & M_FW_SCSI_TGT_ACC_WR_LEN16) #define S_FW_SCSI_TGT_ACC_WR_CP_EN 6 #define M_FW_SCSI_TGT_ACC_WR_CP_EN 0x3 #define V_FW_SCSI_TGT_ACC_WR_CP_EN(x) ((x) << S_FW_SCSI_TGT_ACC_WR_CP_EN) #define G_FW_SCSI_TGT_ACC_WR_CP_EN(x) \ (((x) >> S_FW_SCSI_TGT_ACC_WR_CP_EN) & M_FW_SCSI_TGT_ACC_WR_CP_EN) #define S_FW_SCSI_TGT_ACC_WR_CLASS 4 #define M_FW_SCSI_TGT_ACC_WR_CLASS 0x3 #define V_FW_SCSI_TGT_ACC_WR_CLASS(x) ((x) << S_FW_SCSI_TGT_ACC_WR_CLASS) #define G_FW_SCSI_TGT_ACC_WR_CLASS(x) \ (((x) >> S_FW_SCSI_TGT_ACC_WR_CLASS) & M_FW_SCSI_TGT_ACC_WR_CLASS) struct fw_scsi_tgt_xmit_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 auto_rsp; __u8 use_xfer_cnt; union fw_scsi_tgt_xmit_priv { struct fcoe_tgt_xmit_priv { __u8 ctl_pri; __u8 cp_en_class; __u8 r3_lo[2]; } fcoe; struct iscsi_tgt_xmit_priv { __u8 r3[4]; } iscsi; } u; __be32 xfer_cnt; __be32 r4; __be64 r5; __be32 r6; __be32 tot_xfer_len; }; #define S_FW_SCSI_TGT_XMIT_WR_OPCODE 24 #define M_FW_SCSI_TGT_XMIT_WR_OPCODE 0xff #define V_FW_SCSI_TGT_XMIT_WR_OPCODE(x) ((x) << S_FW_SCSI_TGT_XMIT_WR_OPCODE) #define G_FW_SCSI_TGT_XMIT_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_TGT_XMIT_WR_OPCODE) & M_FW_SCSI_TGT_XMIT_WR_OPCODE) #define S_FW_SCSI_TGT_XMIT_WR_IMMDLEN 0 #define M_FW_SCSI_TGT_XMIT_WR_IMMDLEN 0xff #define V_FW_SCSI_TGT_XMIT_WR_IMMDLEN(x) \ ((x) << S_FW_SCSI_TGT_XMIT_WR_IMMDLEN) #define G_FW_SCSI_TGT_XMIT_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_TGT_XMIT_WR_IMMDLEN) & M_FW_SCSI_TGT_XMIT_WR_IMMDLEN) #define S_FW_SCSI_TGT_XMIT_WR_FLOWID 8 #define M_FW_SCSI_TGT_XMIT_WR_FLOWID 0xfffff #define V_FW_SCSI_TGT_XMIT_WR_FLOWID(x) ((x) << S_FW_SCSI_TGT_XMIT_WR_FLOWID) #define G_FW_SCSI_TGT_XMIT_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_TGT_XMIT_WR_FLOWID) & M_FW_SCSI_TGT_XMIT_WR_FLOWID) #define S_FW_SCSI_TGT_XMIT_WR_LEN16 0 #define M_FW_SCSI_TGT_XMIT_WR_LEN16 0xff #define V_FW_SCSI_TGT_XMIT_WR_LEN16(x) ((x) << S_FW_SCSI_TGT_XMIT_WR_LEN16) #define G_FW_SCSI_TGT_XMIT_WR_LEN16(x) \ (((x) >> S_FW_SCSI_TGT_XMIT_WR_LEN16) & M_FW_SCSI_TGT_XMIT_WR_LEN16) #define S_FW_SCSI_TGT_XMIT_WR_CP_EN 6 #define M_FW_SCSI_TGT_XMIT_WR_CP_EN 0x3 #define V_FW_SCSI_TGT_XMIT_WR_CP_EN(x) ((x) << S_FW_SCSI_TGT_XMIT_WR_CP_EN) #define G_FW_SCSI_TGT_XMIT_WR_CP_EN(x) \ (((x) >> S_FW_SCSI_TGT_XMIT_WR_CP_EN) & M_FW_SCSI_TGT_XMIT_WR_CP_EN) #define S_FW_SCSI_TGT_XMIT_WR_CLASS 4 #define M_FW_SCSI_TGT_XMIT_WR_CLASS 0x3 #define V_FW_SCSI_TGT_XMIT_WR_CLASS(x) ((x) << S_FW_SCSI_TGT_XMIT_WR_CLASS) #define G_FW_SCSI_TGT_XMIT_WR_CLASS(x) \ (((x) >> S_FW_SCSI_TGT_XMIT_WR_CLASS) & M_FW_SCSI_TGT_XMIT_WR_CLASS) struct fw_scsi_tgt_rsp_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 r3[2]; union fw_scsi_tgt_rsp_priv { struct fcoe_tgt_rsp_priv { __u8 ctl_pri; __u8 cp_en_class; __u8 r4_lo[2]; } fcoe; struct iscsi_tgt_rsp_priv { __u8 r4[4]; } iscsi; } u; __u8 r5[8]; }; #define S_FW_SCSI_TGT_RSP_WR_OPCODE 24 #define M_FW_SCSI_TGT_RSP_WR_OPCODE 0xff #define V_FW_SCSI_TGT_RSP_WR_OPCODE(x) ((x) << S_FW_SCSI_TGT_RSP_WR_OPCODE) #define G_FW_SCSI_TGT_RSP_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_TGT_RSP_WR_OPCODE) & M_FW_SCSI_TGT_RSP_WR_OPCODE) #define S_FW_SCSI_TGT_RSP_WR_IMMDLEN 0 #define M_FW_SCSI_TGT_RSP_WR_IMMDLEN 0xff #define V_FW_SCSI_TGT_RSP_WR_IMMDLEN(x) ((x) << S_FW_SCSI_TGT_RSP_WR_IMMDLEN) #define G_FW_SCSI_TGT_RSP_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_TGT_RSP_WR_IMMDLEN) & M_FW_SCSI_TGT_RSP_WR_IMMDLEN) #define S_FW_SCSI_TGT_RSP_WR_FLOWID 8 #define M_FW_SCSI_TGT_RSP_WR_FLOWID 0xfffff #define V_FW_SCSI_TGT_RSP_WR_FLOWID(x) ((x) << S_FW_SCSI_TGT_RSP_WR_FLOWID) #define G_FW_SCSI_TGT_RSP_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_TGT_RSP_WR_FLOWID) & M_FW_SCSI_TGT_RSP_WR_FLOWID) #define S_FW_SCSI_TGT_RSP_WR_LEN16 0 #define M_FW_SCSI_TGT_RSP_WR_LEN16 0xff #define V_FW_SCSI_TGT_RSP_WR_LEN16(x) ((x) << S_FW_SCSI_TGT_RSP_WR_LEN16) #define G_FW_SCSI_TGT_RSP_WR_LEN16(x) \ (((x) >> S_FW_SCSI_TGT_RSP_WR_LEN16) & M_FW_SCSI_TGT_RSP_WR_LEN16) #define S_FW_SCSI_TGT_RSP_WR_CP_EN 6 #define M_FW_SCSI_TGT_RSP_WR_CP_EN 0x3 #define V_FW_SCSI_TGT_RSP_WR_CP_EN(x) ((x) << S_FW_SCSI_TGT_RSP_WR_CP_EN) #define G_FW_SCSI_TGT_RSP_WR_CP_EN(x) \ (((x) >> S_FW_SCSI_TGT_RSP_WR_CP_EN) & M_FW_SCSI_TGT_RSP_WR_CP_EN) #define S_FW_SCSI_TGT_RSP_WR_CLASS 4 #define M_FW_SCSI_TGT_RSP_WR_CLASS 0x3 #define V_FW_SCSI_TGT_RSP_WR_CLASS(x) ((x) << S_FW_SCSI_TGT_RSP_WR_CLASS) #define G_FW_SCSI_TGT_RSP_WR_CLASS(x) \ (((x) >> S_FW_SCSI_TGT_RSP_WR_CLASS) & M_FW_SCSI_TGT_RSP_WR_CLASS) struct fw_pofcoe_tcb_wr { __be32 op_compl; __be32 equiq_to_len16; __be32 r4; __be32 xfer_len; __be32 tid_to_port; __be16 x_id; __be16 vlan_id; __be64 cookie; __be32 s_id; __be32 d_id; __be32 tag; __be16 r6; __be16 iqid; }; #define S_FW_POFCOE_TCB_WR_TID 12 #define M_FW_POFCOE_TCB_WR_TID 0xfffff #define V_FW_POFCOE_TCB_WR_TID(x) ((x) << S_FW_POFCOE_TCB_WR_TID) #define G_FW_POFCOE_TCB_WR_TID(x) \ (((x) >> S_FW_POFCOE_TCB_WR_TID) & M_FW_POFCOE_TCB_WR_TID) #define S_FW_POFCOE_TCB_WR_ALLOC 4 #define M_FW_POFCOE_TCB_WR_ALLOC 0x1 #define V_FW_POFCOE_TCB_WR_ALLOC(x) ((x) << S_FW_POFCOE_TCB_WR_ALLOC) #define G_FW_POFCOE_TCB_WR_ALLOC(x) \ (((x) >> S_FW_POFCOE_TCB_WR_ALLOC) & M_FW_POFCOE_TCB_WR_ALLOC) #define F_FW_POFCOE_TCB_WR_ALLOC V_FW_POFCOE_TCB_WR_ALLOC(1U) #define S_FW_POFCOE_TCB_WR_FREE 3 #define M_FW_POFCOE_TCB_WR_FREE 0x1 #define V_FW_POFCOE_TCB_WR_FREE(x) ((x) << S_FW_POFCOE_TCB_WR_FREE) #define G_FW_POFCOE_TCB_WR_FREE(x) \ (((x) >> S_FW_POFCOE_TCB_WR_FREE) & M_FW_POFCOE_TCB_WR_FREE) #define F_FW_POFCOE_TCB_WR_FREE V_FW_POFCOE_TCB_WR_FREE(1U) #define S_FW_POFCOE_TCB_WR_PORT 0 #define M_FW_POFCOE_TCB_WR_PORT 0x7 #define V_FW_POFCOE_TCB_WR_PORT(x) ((x) << S_FW_POFCOE_TCB_WR_PORT) #define G_FW_POFCOE_TCB_WR_PORT(x) \ (((x) >> S_FW_POFCOE_TCB_WR_PORT) & M_FW_POFCOE_TCB_WR_PORT) struct fw_pofcoe_ulptx_wr { __be32 op_pkd; __be32 equiq_to_len16; __u64 cookie; }; /******************************************************************* * T10 DIF related definition *******************************************************************/ struct fw_tx_pi_header { __be16 op_to_inline; __u8 pi_interval_tag_type; __u8 num_pi; __be32 pi_start4_pi_end4; __u8 tag_gen_enabled_pkd; __u8 num_pi_dsg; __be16 app_tag; __be32 ref_tag; }; #define S_FW_TX_PI_HEADER_OP 8 #define M_FW_TX_PI_HEADER_OP 0xff #define V_FW_TX_PI_HEADER_OP(x) ((x) << S_FW_TX_PI_HEADER_OP) #define G_FW_TX_PI_HEADER_OP(x) \ (((x) >> S_FW_TX_PI_HEADER_OP) & M_FW_TX_PI_HEADER_OP) #define S_FW_TX_PI_HEADER_ULPTXMORE 7 #define M_FW_TX_PI_HEADER_ULPTXMORE 0x1 #define V_FW_TX_PI_HEADER_ULPTXMORE(x) ((x) << S_FW_TX_PI_HEADER_ULPTXMORE) #define G_FW_TX_PI_HEADER_ULPTXMORE(x) \ (((x) >> S_FW_TX_PI_HEADER_ULPTXMORE) & M_FW_TX_PI_HEADER_ULPTXMORE) #define F_FW_TX_PI_HEADER_ULPTXMORE V_FW_TX_PI_HEADER_ULPTXMORE(1U) #define S_FW_TX_PI_HEADER_PI_CONTROL 4 #define M_FW_TX_PI_HEADER_PI_CONTROL 0x7 #define V_FW_TX_PI_HEADER_PI_CONTROL(x) ((x) << S_FW_TX_PI_HEADER_PI_CONTROL) #define G_FW_TX_PI_HEADER_PI_CONTROL(x) \ (((x) >> S_FW_TX_PI_HEADER_PI_CONTROL) & M_FW_TX_PI_HEADER_PI_CONTROL) #define S_FW_TX_PI_HEADER_GUARD_TYPE 2 #define M_FW_TX_PI_HEADER_GUARD_TYPE 0x1 #define V_FW_TX_PI_HEADER_GUARD_TYPE(x) ((x) << S_FW_TX_PI_HEADER_GUARD_TYPE) #define G_FW_TX_PI_HEADER_GUARD_TYPE(x) \ (((x) >> S_FW_TX_PI_HEADER_GUARD_TYPE) & M_FW_TX_PI_HEADER_GUARD_TYPE) #define F_FW_TX_PI_HEADER_GUARD_TYPE V_FW_TX_PI_HEADER_GUARD_TYPE(1U) #define S_FW_TX_PI_HEADER_VALIDATE 1 #define M_FW_TX_PI_HEADER_VALIDATE 0x1 #define V_FW_TX_PI_HEADER_VALIDATE(x) ((x) << S_FW_TX_PI_HEADER_VALIDATE) #define G_FW_TX_PI_HEADER_VALIDATE(x) \ (((x) >> S_FW_TX_PI_HEADER_VALIDATE) & M_FW_TX_PI_HEADER_VALIDATE) #define F_FW_TX_PI_HEADER_VALIDATE V_FW_TX_PI_HEADER_VALIDATE(1U) #define S_FW_TX_PI_HEADER_INLINE 0 #define M_FW_TX_PI_HEADER_INLINE 0x1 #define V_FW_TX_PI_HEADER_INLINE(x) ((x) << S_FW_TX_PI_HEADER_INLINE) #define G_FW_TX_PI_HEADER_INLINE(x) \ (((x) >> S_FW_TX_PI_HEADER_INLINE) & M_FW_TX_PI_HEADER_INLINE) #define F_FW_TX_PI_HEADER_INLINE V_FW_TX_PI_HEADER_INLINE(1U) #define S_FW_TX_PI_HEADER_PI_INTERVAL 7 #define M_FW_TX_PI_HEADER_PI_INTERVAL 0x1 #define V_FW_TX_PI_HEADER_PI_INTERVAL(x) \ ((x) << S_FW_TX_PI_HEADER_PI_INTERVAL) #define G_FW_TX_PI_HEADER_PI_INTERVAL(x) \ (((x) >> S_FW_TX_PI_HEADER_PI_INTERVAL) & M_FW_TX_PI_HEADER_PI_INTERVAL) #define F_FW_TX_PI_HEADER_PI_INTERVAL V_FW_TX_PI_HEADER_PI_INTERVAL(1U) #define S_FW_TX_PI_HEADER_TAG_TYPE 5 #define M_FW_TX_PI_HEADER_TAG_TYPE 0x3 #define V_FW_TX_PI_HEADER_TAG_TYPE(x) ((x) << S_FW_TX_PI_HEADER_TAG_TYPE) #define G_FW_TX_PI_HEADER_TAG_TYPE(x) \ (((x) >> S_FW_TX_PI_HEADER_TAG_TYPE) & M_FW_TX_PI_HEADER_TAG_TYPE) #define S_FW_TX_PI_HEADER_PI_START4 22 #define M_FW_TX_PI_HEADER_PI_START4 0x3ff #define V_FW_TX_PI_HEADER_PI_START4(x) ((x) << S_FW_TX_PI_HEADER_PI_START4) #define G_FW_TX_PI_HEADER_PI_START4(x) \ (((x) >> S_FW_TX_PI_HEADER_PI_START4) & M_FW_TX_PI_HEADER_PI_START4) #define S_FW_TX_PI_HEADER_PI_END4 0 #define M_FW_TX_PI_HEADER_PI_END4 0x3fffff #define V_FW_TX_PI_HEADER_PI_END4(x) ((x) << S_FW_TX_PI_HEADER_PI_END4) #define G_FW_TX_PI_HEADER_PI_END4(x) \ (((x) >> S_FW_TX_PI_HEADER_PI_END4) & M_FW_TX_PI_HEADER_PI_END4) #define S_FW_TX_PI_HEADER_TAG_GEN_ENABLED 6 #define M_FW_TX_PI_HEADER_TAG_GEN_ENABLED 0x3 #define V_FW_TX_PI_HEADER_TAG_GEN_ENABLED(x) \ ((x) << S_FW_TX_PI_HEADER_TAG_GEN_ENABLED) #define G_FW_TX_PI_HEADER_TAG_GEN_ENABLED(x) \ (((x) >> S_FW_TX_PI_HEADER_TAG_GEN_ENABLED) & \ M_FW_TX_PI_HEADER_TAG_GEN_ENABLED) enum fw_pi_error_type { FW_PI_ERROR_GUARD_CHECK_FAILED = 0, }; struct fw_pi_error { __be32 err_type_pkd; __be32 flowid_len16; __be16 r2; __be16 app_tag; __be32 ref_tag; __be32 pisc[4]; }; #define S_FW_PI_ERROR_ERR_TYPE 24 #define M_FW_PI_ERROR_ERR_TYPE 0xff #define V_FW_PI_ERROR_ERR_TYPE(x) ((x) << S_FW_PI_ERROR_ERR_TYPE) #define G_FW_PI_ERROR_ERR_TYPE(x) \ (((x) >> S_FW_PI_ERROR_ERR_TYPE) & M_FW_PI_ERROR_ERR_TYPE) struct fw_tlstx_data_wr { __be32 op_to_immdlen; __be32 flowid_len16; __be32 plen; __be32 lsodisable_to_flags; __be32 r5; __be32 ctxloc_to_exp; __be16 mfs; __be16 adjustedplen_pkd; __be16 expinplenmax_pkd; __u8 pdusinplenmax_pkd; __u8 r10; }; #define S_FW_TLSTX_DATA_WR_OPCODE 24 #define M_FW_TLSTX_DATA_WR_OPCODE 0xff #define V_FW_TLSTX_DATA_WR_OPCODE(x) ((x) << S_FW_TLSTX_DATA_WR_OPCODE) #define G_FW_TLSTX_DATA_WR_OPCODE(x) \ (((x) >> S_FW_TLSTX_DATA_WR_OPCODE) & M_FW_TLSTX_DATA_WR_OPCODE) #define S_FW_TLSTX_DATA_WR_COMPL 21 #define M_FW_TLSTX_DATA_WR_COMPL 0x1 #define V_FW_TLSTX_DATA_WR_COMPL(x) ((x) << S_FW_TLSTX_DATA_WR_COMPL) #define G_FW_TLSTX_DATA_WR_COMPL(x) \ (((x) >> S_FW_TLSTX_DATA_WR_COMPL) & M_FW_TLSTX_DATA_WR_COMPL) #define F_FW_TLSTX_DATA_WR_COMPL V_FW_TLSTX_DATA_WR_COMPL(1U) #define S_FW_TLSTX_DATA_WR_IMMDLEN 0 #define M_FW_TLSTX_DATA_WR_IMMDLEN 0xff #define V_FW_TLSTX_DATA_WR_IMMDLEN(x) ((x) << S_FW_TLSTX_DATA_WR_IMMDLEN) #define G_FW_TLSTX_DATA_WR_IMMDLEN(x) \ (((x) >> S_FW_TLSTX_DATA_WR_IMMDLEN) & M_FW_TLSTX_DATA_WR_IMMDLEN) #define S_FW_TLSTX_DATA_WR_FLOWID 8 #define M_FW_TLSTX_DATA_WR_FLOWID 0xfffff #define V_FW_TLSTX_DATA_WR_FLOWID(x) ((x) << S_FW_TLSTX_DATA_WR_FLOWID) #define G_FW_TLSTX_DATA_WR_FLOWID(x) \ (((x) >> S_FW_TLSTX_DATA_WR_FLOWID) & M_FW_TLSTX_DATA_WR_FLOWID) #define S_FW_TLSTX_DATA_WR_LEN16 0 #define M_FW_TLSTX_DATA_WR_LEN16 0xff #define V_FW_TLSTX_DATA_WR_LEN16(x) ((x) << S_FW_TLSTX_DATA_WR_LEN16) #define G_FW_TLSTX_DATA_WR_LEN16(x) \ (((x) >> S_FW_TLSTX_DATA_WR_LEN16) & M_FW_TLSTX_DATA_WR_LEN16) #define S_FW_TLSTX_DATA_WR_LSODISABLE 31 #define M_FW_TLSTX_DATA_WR_LSODISABLE 0x1 #define V_FW_TLSTX_DATA_WR_LSODISABLE(x) \ ((x) << S_FW_TLSTX_DATA_WR_LSODISABLE) #define G_FW_TLSTX_DATA_WR_LSODISABLE(x) \ (((x) >> S_FW_TLSTX_DATA_WR_LSODISABLE) & M_FW_TLSTX_DATA_WR_LSODISABLE) #define F_FW_TLSTX_DATA_WR_LSODISABLE V_FW_TLSTX_DATA_WR_LSODISABLE(1U) #define S_FW_TLSTX_DATA_WR_ALIGNPLD 30 #define M_FW_TLSTX_DATA_WR_ALIGNPLD 0x1 #define V_FW_TLSTX_DATA_WR_ALIGNPLD(x) ((x) << S_FW_TLSTX_DATA_WR_ALIGNPLD) #define G_FW_TLSTX_DATA_WR_ALIGNPLD(x) \ (((x) >> S_FW_TLSTX_DATA_WR_ALIGNPLD) & M_FW_TLSTX_DATA_WR_ALIGNPLD) #define F_FW_TLSTX_DATA_WR_ALIGNPLD V_FW_TLSTX_DATA_WR_ALIGNPLD(1U) #define S_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE 29 #define M_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE 0x1 #define V_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE(x) \ ((x) << S_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE) #define G_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE(x) \ (((x) >> S_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE) & \ M_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE) #define F_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE V_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE(1U) #define S_FW_TLSTX_DATA_WR_FLAGS 0 #define M_FW_TLSTX_DATA_WR_FLAGS 0xfffffff #define V_FW_TLSTX_DATA_WR_FLAGS(x) ((x) << S_FW_TLSTX_DATA_WR_FLAGS) #define G_FW_TLSTX_DATA_WR_FLAGS(x) \ (((x) >> S_FW_TLSTX_DATA_WR_FLAGS) & M_FW_TLSTX_DATA_WR_FLAGS) #define S_FW_TLSTX_DATA_WR_CTXLOC 30 #define M_FW_TLSTX_DATA_WR_CTXLOC 0x3 #define V_FW_TLSTX_DATA_WR_CTXLOC(x) ((x) << S_FW_TLSTX_DATA_WR_CTXLOC) #define G_FW_TLSTX_DATA_WR_CTXLOC(x) \ (((x) >> S_FW_TLSTX_DATA_WR_CTXLOC) & M_FW_TLSTX_DATA_WR_CTXLOC) #define S_FW_TLSTX_DATA_WR_IVDSGL 29 #define M_FW_TLSTX_DATA_WR_IVDSGL 0x1 #define V_FW_TLSTX_DATA_WR_IVDSGL(x) ((x) << S_FW_TLSTX_DATA_WR_IVDSGL) #define G_FW_TLSTX_DATA_WR_IVDSGL(x) \ (((x) >> S_FW_TLSTX_DATA_WR_IVDSGL) & M_FW_TLSTX_DATA_WR_IVDSGL) #define F_FW_TLSTX_DATA_WR_IVDSGL V_FW_TLSTX_DATA_WR_IVDSGL(1U) #define S_FW_TLSTX_DATA_WR_KEYSIZE 24 #define M_FW_TLSTX_DATA_WR_KEYSIZE 0x1f #define V_FW_TLSTX_DATA_WR_KEYSIZE(x) ((x) << S_FW_TLSTX_DATA_WR_KEYSIZE) #define G_FW_TLSTX_DATA_WR_KEYSIZE(x) \ (((x) >> S_FW_TLSTX_DATA_WR_KEYSIZE) & M_FW_TLSTX_DATA_WR_KEYSIZE) #define S_FW_TLSTX_DATA_WR_NUMIVS 14 #define M_FW_TLSTX_DATA_WR_NUMIVS 0xff #define V_FW_TLSTX_DATA_WR_NUMIVS(x) ((x) << S_FW_TLSTX_DATA_WR_NUMIVS) #define G_FW_TLSTX_DATA_WR_NUMIVS(x) \ (((x) >> S_FW_TLSTX_DATA_WR_NUMIVS) & M_FW_TLSTX_DATA_WR_NUMIVS) #define S_FW_TLSTX_DATA_WR_EXP 0 #define M_FW_TLSTX_DATA_WR_EXP 0x3fff #define V_FW_TLSTX_DATA_WR_EXP(x) ((x) << S_FW_TLSTX_DATA_WR_EXP) #define G_FW_TLSTX_DATA_WR_EXP(x) \ (((x) >> S_FW_TLSTX_DATA_WR_EXP) & M_FW_TLSTX_DATA_WR_EXP) #define S_FW_TLSTX_DATA_WR_ADJUSTEDPLEN 1 #define M_FW_TLSTX_DATA_WR_ADJUSTEDPLEN 0x7fff #define V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(x) \ ((x) << S_FW_TLSTX_DATA_WR_ADJUSTEDPLEN) #define G_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(x) \ (((x) >> S_FW_TLSTX_DATA_WR_ADJUSTEDPLEN) & \ M_FW_TLSTX_DATA_WR_ADJUSTEDPLEN) #define S_FW_TLSTX_DATA_WR_EXPINPLENMAX 4 #define M_FW_TLSTX_DATA_WR_EXPINPLENMAX 0xfff #define V_FW_TLSTX_DATA_WR_EXPINPLENMAX(x) \ ((x) << S_FW_TLSTX_DATA_WR_EXPINPLENMAX) #define G_FW_TLSTX_DATA_WR_EXPINPLENMAX(x) \ (((x) >> S_FW_TLSTX_DATA_WR_EXPINPLENMAX) & \ M_FW_TLSTX_DATA_WR_EXPINPLENMAX) #define S_FW_TLSTX_DATA_WR_PDUSINPLENMAX 2 #define M_FW_TLSTX_DATA_WR_PDUSINPLENMAX 0x3f #define V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(x) \ ((x) << S_FW_TLSTX_DATA_WR_PDUSINPLENMAX) #define G_FW_TLSTX_DATA_WR_PDUSINPLENMAX(x) \ (((x) >> S_FW_TLSTX_DATA_WR_PDUSINPLENMAX) & \ M_FW_TLSTX_DATA_WR_PDUSINPLENMAX) struct fw_crypto_lookaside_wr { __be32 op_to_cctx_size; __be32 len16_pkd; __be32 session_id; __be32 rx_chid_to_rx_q_id; __be32 key_addr; __be32 pld_size_hash_size; __be64 cookie; }; #define S_FW_CRYPTO_LOOKASIDE_WR_OPCODE 24 #define M_FW_CRYPTO_LOOKASIDE_WR_OPCODE 0xff #define V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_OPCODE) #define G_FW_CRYPTO_LOOKASIDE_WR_OPCODE(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_OPCODE) & \ M_FW_CRYPTO_LOOKASIDE_WR_OPCODE) #define S_FW_CRYPTO_LOOKASIDE_WR_COMPL 23 #define M_FW_CRYPTO_LOOKASIDE_WR_COMPL 0x1 #define V_FW_CRYPTO_LOOKASIDE_WR_COMPL(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_COMPL) #define G_FW_CRYPTO_LOOKASIDE_WR_COMPL(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_COMPL) & \ M_FW_CRYPTO_LOOKASIDE_WR_COMPL) #define F_FW_CRYPTO_LOOKASIDE_WR_COMPL V_FW_CRYPTO_LOOKASIDE_WR_COMPL(1U) #define S_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN 15 #define M_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN 0xff #define V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN) #define G_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN) & \ M_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN) #define S_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC 5 #define M_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC 0x3 #define V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC) #define G_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC) & \ M_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC) #define S_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE 0 #define M_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE 0x1f #define V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE) #define G_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE) & \ M_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE) #define S_FW_CRYPTO_LOOKASIDE_WR_LEN16 0 #define M_FW_CRYPTO_LOOKASIDE_WR_LEN16 0xff #define V_FW_CRYPTO_LOOKASIDE_WR_LEN16(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_LEN16) #define G_FW_CRYPTO_LOOKASIDE_WR_LEN16(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_LEN16) & \ M_FW_CRYPTO_LOOKASIDE_WR_LEN16) #define S_FW_CRYPTO_LOOKASIDE_WR_RX_CHID 29 #define M_FW_CRYPTO_LOOKASIDE_WR_RX_CHID 0x3 #define V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_RX_CHID) #define G_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_RX_CHID) & \ M_FW_CRYPTO_LOOKASIDE_WR_RX_CHID) #define S_FW_CRYPTO_LOOKASIDE_WR_LCB 27 #define M_FW_CRYPTO_LOOKASIDE_WR_LCB 0x3 #define V_FW_CRYPTO_LOOKASIDE_WR_LCB(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_LCB) #define G_FW_CRYPTO_LOOKASIDE_WR_LCB(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_LCB) & M_FW_CRYPTO_LOOKASIDE_WR_LCB) #define S_FW_CRYPTO_LOOKASIDE_WR_PHASH 25 #define M_FW_CRYPTO_LOOKASIDE_WR_PHASH 0x3 #define V_FW_CRYPTO_LOOKASIDE_WR_PHASH(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_PHASH) #define G_FW_CRYPTO_LOOKASIDE_WR_PHASH(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_PHASH) & \ M_FW_CRYPTO_LOOKASIDE_WR_PHASH) #define S_FW_CRYPTO_LOOKASIDE_WR_IV 23 #define M_FW_CRYPTO_LOOKASIDE_WR_IV 0x3 #define V_FW_CRYPTO_LOOKASIDE_WR_IV(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_IV) #define G_FW_CRYPTO_LOOKASIDE_WR_IV(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_IV) & M_FW_CRYPTO_LOOKASIDE_WR_IV) #define S_FW_CRYPTO_LOOKASIDE_WR_FQIDX 15 #define M_FW_CRYPTO_LOOKASIDE_WR_FQIDX 0xff #define V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_FQIDX) #define G_FW_CRYPTO_LOOKASIDE_WR_FQIDX(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_FQIDX) &\ M_FW_CRYPTO_LOOKASIDE_WR_FQIDX) #define S_FW_CRYPTO_LOOKASIDE_WR_TX_CH 10 #define M_FW_CRYPTO_LOOKASIDE_WR_TX_CH 0x3 #define V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_TX_CH) #define G_FW_CRYPTO_LOOKASIDE_WR_TX_CH(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_TX_CH) & \ M_FW_CRYPTO_LOOKASIDE_WR_TX_CH) #define S_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID 0 #define M_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID 0x3ff #define V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID) #define G_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID) & \ M_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID) #define S_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE 24 #define M_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE 0xff #define V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE) #define G_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE) & \ M_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE) #define S_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE 17 #define M_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE 0x7f #define V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE) #define G_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE) & \ M_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE) /****************************************************************************** * C O M M A N D s *********************/ /* * The maximum length of time, in miliseconds, that we expect any firmware * command to take to execute and return a reply to the host. The RESET * and INITIALIZE commands can take a fair amount of time to execute but * most execute in far less time than this maximum. This constant is used * by host software to determine how long to wait for a firmware command * reply before declaring the firmware as dead/unreachable ... */ #define FW_CMD_MAX_TIMEOUT 10000 /* * If a host driver does a HELLO and discovers that there's already a MASTER * selected, we may have to wait for that MASTER to finish issuing RESET, * configuration and INITIALIZE commands. Also, there's a possibility that * our own HELLO may get lost if it happens right as the MASTER is issuign a * RESET command, so we need to be willing to make a few retries of our HELLO. */ #define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT) #define FW_CMD_HELLO_RETRIES 3 enum fw_cmd_opcodes { FW_LDST_CMD = 0x01, FW_RESET_CMD = 0x03, FW_HELLO_CMD = 0x04, FW_BYE_CMD = 0x05, FW_INITIALIZE_CMD = 0x06, FW_CAPS_CONFIG_CMD = 0x07, FW_PARAMS_CMD = 0x08, FW_PFVF_CMD = 0x09, FW_IQ_CMD = 0x10, FW_EQ_MNGT_CMD = 0x11, FW_EQ_ETH_CMD = 0x12, FW_EQ_CTRL_CMD = 0x13, FW_EQ_OFLD_CMD = 0x21, FW_VI_CMD = 0x14, FW_VI_MAC_CMD = 0x15, FW_VI_RXMODE_CMD = 0x16, FW_VI_ENABLE_CMD = 0x17, FW_VI_STATS_CMD = 0x1a, FW_ACL_MAC_CMD = 0x18, FW_ACL_VLAN_CMD = 0x19, FW_PORT_CMD = 0x1b, FW_PORT_STATS_CMD = 0x1c, FW_PORT_LB_STATS_CMD = 0x1d, FW_PORT_TRACE_CMD = 0x1e, FW_PORT_TRACE_MMAP_CMD = 0x1f, FW_RSS_IND_TBL_CMD = 0x20, FW_RSS_GLB_CONFIG_CMD = 0x22, FW_RSS_VI_CONFIG_CMD = 0x23, FW_SCHED_CMD = 0x24, FW_DEVLOG_CMD = 0x25, FW_WATCHDOG_CMD = 0x27, FW_CLIP_CMD = 0x28, FW_CHNET_IFACE_CMD = 0x26, FW_FCOE_RES_INFO_CMD = 0x31, FW_FCOE_LINK_CMD = 0x32, FW_FCOE_VNP_CMD = 0x33, FW_FCOE_SPARAMS_CMD = 0x35, FW_FCOE_STATS_CMD = 0x37, FW_FCOE_FCF_CMD = 0x38, FW_DCB_IEEE_CMD = 0x3a, FW_DIAG_CMD = 0x3d, FW_PTP_CMD = 0x3e, FW_HMA_CMD = 0x3f, FW_LASTC2E_CMD = 0x40, FW_ERROR_CMD = 0x80, FW_DEBUG_CMD = 0x81, }; enum fw_cmd_cap { FW_CMD_CAP_PF = 0x01, FW_CMD_CAP_DMAQ = 0x02, FW_CMD_CAP_PORT = 0x04, FW_CMD_CAP_PORTPROMISC = 0x08, FW_CMD_CAP_PORTSTATS = 0x10, FW_CMD_CAP_VF = 0x80, }; /* * Generic command header flit0 */ struct fw_cmd_hdr { __be32 hi; __be32 lo; }; #define S_FW_CMD_OP 24 #define M_FW_CMD_OP 0xff #define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP) #define G_FW_CMD_OP(x) (((x) >> S_FW_CMD_OP) & M_FW_CMD_OP) #define S_FW_CMD_REQUEST 23 #define M_FW_CMD_REQUEST 0x1 #define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST) #define G_FW_CMD_REQUEST(x) (((x) >> S_FW_CMD_REQUEST) & M_FW_CMD_REQUEST) #define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U) #define S_FW_CMD_READ 22 #define M_FW_CMD_READ 0x1 #define V_FW_CMD_READ(x) ((x) << S_FW_CMD_READ) #define G_FW_CMD_READ(x) (((x) >> S_FW_CMD_READ) & M_FW_CMD_READ) #define F_FW_CMD_READ V_FW_CMD_READ(1U) #define S_FW_CMD_WRITE 21 #define M_FW_CMD_WRITE 0x1 #define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE) #define G_FW_CMD_WRITE(x) (((x) >> S_FW_CMD_WRITE) & M_FW_CMD_WRITE) #define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U) #define S_FW_CMD_EXEC 20 #define M_FW_CMD_EXEC 0x1 #define V_FW_CMD_EXEC(x) ((x) << S_FW_CMD_EXEC) #define G_FW_CMD_EXEC(x) (((x) >> S_FW_CMD_EXEC) & M_FW_CMD_EXEC) #define F_FW_CMD_EXEC V_FW_CMD_EXEC(1U) #define S_FW_CMD_RAMASK 20 #define M_FW_CMD_RAMASK 0xf #define V_FW_CMD_RAMASK(x) ((x) << S_FW_CMD_RAMASK) #define G_FW_CMD_RAMASK(x) (((x) >> S_FW_CMD_RAMASK) & M_FW_CMD_RAMASK) #define S_FW_CMD_RETVAL 8 #define M_FW_CMD_RETVAL 0xff #define V_FW_CMD_RETVAL(x) ((x) << S_FW_CMD_RETVAL) #define G_FW_CMD_RETVAL(x) (((x) >> S_FW_CMD_RETVAL) & M_FW_CMD_RETVAL) #define S_FW_CMD_LEN16 0 #define M_FW_CMD_LEN16 0xff #define V_FW_CMD_LEN16(x) ((x) << S_FW_CMD_LEN16) #define G_FW_CMD_LEN16(x) (((x) >> S_FW_CMD_LEN16) & M_FW_CMD_LEN16) #define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16) /* * address spaces */ enum fw_ldst_addrspc { FW_LDST_ADDRSPC_FIRMWARE = 0x0001, FW_LDST_ADDRSPC_SGE_EGRC = 0x0008, FW_LDST_ADDRSPC_SGE_INGC = 0x0009, FW_LDST_ADDRSPC_SGE_FLMC = 0x000a, FW_LDST_ADDRSPC_SGE_CONMC = 0x000b, FW_LDST_ADDRSPC_TP_PIO = 0x0010, FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011, FW_LDST_ADDRSPC_TP_MIB = 0x0012, FW_LDST_ADDRSPC_MDIO = 0x0018, FW_LDST_ADDRSPC_MPS = 0x0020, FW_LDST_ADDRSPC_FUNC = 0x0028, FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029, FW_LDST_ADDRSPC_FUNC_I2C = 0x002A, /* legacy */ FW_LDST_ADDRSPC_LE = 0x0030, FW_LDST_ADDRSPC_I2C = 0x0038, FW_LDST_ADDRSPC_PCIE_CFGS = 0x0040, FW_LDST_ADDRSPC_PCIE_DBG = 0x0041, FW_LDST_ADDRSPC_PCIE_PHY = 0x0042, FW_LDST_ADDRSPC_CIM_Q = 0x0048, }; /* * MDIO VSC8634 register access control field */ enum fw_ldst_mdio_vsc8634_aid { FW_LDST_MDIO_VS_STANDARD, FW_LDST_MDIO_VS_EXTENDED, FW_LDST_MDIO_VS_GPIO }; enum fw_ldst_mps_fid { FW_LDST_MPS_ATRB, FW_LDST_MPS_RPLC }; enum fw_ldst_func_access_ctl { FW_LDST_FUNC_ACC_CTL_VIID, FW_LDST_FUNC_ACC_CTL_FID }; enum fw_ldst_func_mod_index { FW_LDST_FUNC_MPS }; struct fw_ldst_cmd { __be32 op_to_addrspace; __be32 cycles_to_len16; union fw_ldst { struct fw_ldst_addrval { __be32 addr; __be32 val; } addrval; struct fw_ldst_idctxt { __be32 physid; __be32 msg_ctxtflush; __be32 ctxt_data7; __be32 ctxt_data6; __be32 ctxt_data5; __be32 ctxt_data4; __be32 ctxt_data3; __be32 ctxt_data2; __be32 ctxt_data1; __be32 ctxt_data0; } idctxt; struct fw_ldst_mdio { __be16 paddr_mmd; __be16 raddr; __be16 vctl; __be16 rval; } mdio; struct fw_ldst_cim_rq { __u8 req_first64[8]; __u8 req_second64[8]; __u8 resp_first64[8]; __u8 resp_second64[8]; __be32 r3[2]; } cim_rq; union fw_ldst_mps { struct fw_ldst_mps_rplc { __be16 fid_idx; __be16 rplcpf_pkd; __be32 rplc255_224; __be32 rplc223_192; __be32 rplc191_160; __be32 rplc159_128; __be32 rplc127_96; __be32 rplc95_64; __be32 rplc63_32; __be32 rplc31_0; } rplc; struct fw_ldst_mps_atrb { __be16 fid_mpsid; __be16 r2[3]; __be32 r3[2]; __be32 r4; __be32 atrb; __be16 vlan[16]; } atrb; } mps; struct fw_ldst_func { __u8 access_ctl; __u8 mod_index; __be16 ctl_id; __be32 offset; __be64 data0; __be64 data1; } func; struct fw_ldst_pcie { __u8 ctrl_to_fn; __u8 bnum; __u8 r; __u8 ext_r; __u8 select_naccess; __u8 pcie_fn; __be16 nset_pkd; __be32 data[12]; } pcie; struct fw_ldst_i2c_deprecated { __u8 pid_pkd; __u8 base; __u8 boffset; __u8 data; __be32 r9; } i2c_deprecated; struct fw_ldst_i2c { __u8 pid; __u8 did; __u8 boffset; __u8 blen; __be32 r9; __u8 data[48]; } i2c; struct fw_ldst_le { __be32 index; __be32 r9; __u8 val[33]; __u8 r11[7]; } le; } u; }; #define S_FW_LDST_CMD_ADDRSPACE 0 #define M_FW_LDST_CMD_ADDRSPACE 0xff #define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE) #define G_FW_LDST_CMD_ADDRSPACE(x) \ (((x) >> S_FW_LDST_CMD_ADDRSPACE) & M_FW_LDST_CMD_ADDRSPACE) #define S_FW_LDST_CMD_CYCLES 16 #define M_FW_LDST_CMD_CYCLES 0xffff #define V_FW_LDST_CMD_CYCLES(x) ((x) << S_FW_LDST_CMD_CYCLES) #define G_FW_LDST_CMD_CYCLES(x) \ (((x) >> S_FW_LDST_CMD_CYCLES) & M_FW_LDST_CMD_CYCLES) #define S_FW_LDST_CMD_MSG 31 #define M_FW_LDST_CMD_MSG 0x1 #define V_FW_LDST_CMD_MSG(x) ((x) << S_FW_LDST_CMD_MSG) #define G_FW_LDST_CMD_MSG(x) \ (((x) >> S_FW_LDST_CMD_MSG) & M_FW_LDST_CMD_MSG) #define F_FW_LDST_CMD_MSG V_FW_LDST_CMD_MSG(1U) #define S_FW_LDST_CMD_CTXTFLUSH 30 #define M_FW_LDST_CMD_CTXTFLUSH 0x1 #define V_FW_LDST_CMD_CTXTFLUSH(x) ((x) << S_FW_LDST_CMD_CTXTFLUSH) #define G_FW_LDST_CMD_CTXTFLUSH(x) \ (((x) >> S_FW_LDST_CMD_CTXTFLUSH) & M_FW_LDST_CMD_CTXTFLUSH) #define F_FW_LDST_CMD_CTXTFLUSH V_FW_LDST_CMD_CTXTFLUSH(1U) #define S_FW_LDST_CMD_PADDR 8 #define M_FW_LDST_CMD_PADDR 0x1f #define V_FW_LDST_CMD_PADDR(x) ((x) << S_FW_LDST_CMD_PADDR) #define G_FW_LDST_CMD_PADDR(x) \ (((x) >> S_FW_LDST_CMD_PADDR) & M_FW_LDST_CMD_PADDR) #define S_FW_LDST_CMD_MMD 0 #define M_FW_LDST_CMD_MMD 0x1f #define V_FW_LDST_CMD_MMD(x) ((x) << S_FW_LDST_CMD_MMD) #define G_FW_LDST_CMD_MMD(x) \ (((x) >> S_FW_LDST_CMD_MMD) & M_FW_LDST_CMD_MMD) #define S_FW_LDST_CMD_FID 15 #define M_FW_LDST_CMD_FID 0x1 #define V_FW_LDST_CMD_FID(x) ((x) << S_FW_LDST_CMD_FID) #define G_FW_LDST_CMD_FID(x) \ (((x) >> S_FW_LDST_CMD_FID) & M_FW_LDST_CMD_FID) #define F_FW_LDST_CMD_FID V_FW_LDST_CMD_FID(1U) #define S_FW_LDST_CMD_IDX 0 #define M_FW_LDST_CMD_IDX 0x7fff #define V_FW_LDST_CMD_IDX(x) ((x) << S_FW_LDST_CMD_IDX) #define G_FW_LDST_CMD_IDX(x) \ (((x) >> S_FW_LDST_CMD_IDX) & M_FW_LDST_CMD_IDX) #define S_FW_LDST_CMD_RPLCPF 0 #define M_FW_LDST_CMD_RPLCPF 0xff #define V_FW_LDST_CMD_RPLCPF(x) ((x) << S_FW_LDST_CMD_RPLCPF) #define G_FW_LDST_CMD_RPLCPF(x) \ (((x) >> S_FW_LDST_CMD_RPLCPF) & M_FW_LDST_CMD_RPLCPF) #define S_FW_LDST_CMD_MPSID 0 #define M_FW_LDST_CMD_MPSID 0x7fff #define V_FW_LDST_CMD_MPSID(x) ((x) << S_FW_LDST_CMD_MPSID) #define G_FW_LDST_CMD_MPSID(x) \ (((x) >> S_FW_LDST_CMD_MPSID) & M_FW_LDST_CMD_MPSID) #define S_FW_LDST_CMD_CTRL 7 #define M_FW_LDST_CMD_CTRL 0x1 #define V_FW_LDST_CMD_CTRL(x) ((x) << S_FW_LDST_CMD_CTRL) #define G_FW_LDST_CMD_CTRL(x) \ (((x) >> S_FW_LDST_CMD_CTRL) & M_FW_LDST_CMD_CTRL) #define F_FW_LDST_CMD_CTRL V_FW_LDST_CMD_CTRL(1U) #define S_FW_LDST_CMD_LC 4 #define M_FW_LDST_CMD_LC 0x1 #define V_FW_LDST_CMD_LC(x) ((x) << S_FW_LDST_CMD_LC) #define G_FW_LDST_CMD_LC(x) \ (((x) >> S_FW_LDST_CMD_LC) & M_FW_LDST_CMD_LC) #define F_FW_LDST_CMD_LC V_FW_LDST_CMD_LC(1U) #define S_FW_LDST_CMD_AI 3 #define M_FW_LDST_CMD_AI 0x1 #define V_FW_LDST_CMD_AI(x) ((x) << S_FW_LDST_CMD_AI) #define G_FW_LDST_CMD_AI(x) \ (((x) >> S_FW_LDST_CMD_AI) & M_FW_LDST_CMD_AI) #define F_FW_LDST_CMD_AI V_FW_LDST_CMD_AI(1U) #define S_FW_LDST_CMD_FN 0 #define M_FW_LDST_CMD_FN 0x7 #define V_FW_LDST_CMD_FN(x) ((x) << S_FW_LDST_CMD_FN) #define G_FW_LDST_CMD_FN(x) \ (((x) >> S_FW_LDST_CMD_FN) & M_FW_LDST_CMD_FN) #define S_FW_LDST_CMD_SELECT 4 #define M_FW_LDST_CMD_SELECT 0xf #define V_FW_LDST_CMD_SELECT(x) ((x) << S_FW_LDST_CMD_SELECT) #define G_FW_LDST_CMD_SELECT(x) \ (((x) >> S_FW_LDST_CMD_SELECT) & M_FW_LDST_CMD_SELECT) #define S_FW_LDST_CMD_NACCESS 0 #define M_FW_LDST_CMD_NACCESS 0xf #define V_FW_LDST_CMD_NACCESS(x) ((x) << S_FW_LDST_CMD_NACCESS) #define G_FW_LDST_CMD_NACCESS(x) \ (((x) >> S_FW_LDST_CMD_NACCESS) & M_FW_LDST_CMD_NACCESS) #define S_FW_LDST_CMD_NSET 14 #define M_FW_LDST_CMD_NSET 0x3 #define V_FW_LDST_CMD_NSET(x) ((x) << S_FW_LDST_CMD_NSET) #define G_FW_LDST_CMD_NSET(x) \ (((x) >> S_FW_LDST_CMD_NSET) & M_FW_LDST_CMD_NSET) #define S_FW_LDST_CMD_PID 6 #define M_FW_LDST_CMD_PID 0x3 #define V_FW_LDST_CMD_PID(x) ((x) << S_FW_LDST_CMD_PID) #define G_FW_LDST_CMD_PID(x) \ (((x) >> S_FW_LDST_CMD_PID) & M_FW_LDST_CMD_PID) struct fw_reset_cmd { __be32 op_to_write; __be32 retval_len16; __be32 val; __be32 halt_pkd; }; #define S_FW_RESET_CMD_HALT 31 #define M_FW_RESET_CMD_HALT 0x1 #define V_FW_RESET_CMD_HALT(x) ((x) << S_FW_RESET_CMD_HALT) #define G_FW_RESET_CMD_HALT(x) \ (((x) >> S_FW_RESET_CMD_HALT) & M_FW_RESET_CMD_HALT) #define F_FW_RESET_CMD_HALT V_FW_RESET_CMD_HALT(1U) enum { FW_HELLO_CMD_STAGE_OS = 0, FW_HELLO_CMD_STAGE_PREOS0 = 1, FW_HELLO_CMD_STAGE_PREOS1 = 2, FW_HELLO_CMD_STAGE_POSTOS = 3, }; struct fw_hello_cmd { __be32 op_to_write; __be32 retval_len16; __be32 err_to_clearinit; __be32 fwrev; }; #define S_FW_HELLO_CMD_ERR 31 #define M_FW_HELLO_CMD_ERR 0x1 #define V_FW_HELLO_CMD_ERR(x) ((x) << S_FW_HELLO_CMD_ERR) #define G_FW_HELLO_CMD_ERR(x) \ (((x) >> S_FW_HELLO_CMD_ERR) & M_FW_HELLO_CMD_ERR) #define F_FW_HELLO_CMD_ERR V_FW_HELLO_CMD_ERR(1U) #define S_FW_HELLO_CMD_INIT 30 #define M_FW_HELLO_CMD_INIT 0x1 #define V_FW_HELLO_CMD_INIT(x) ((x) << S_FW_HELLO_CMD_INIT) #define G_FW_HELLO_CMD_INIT(x) \ (((x) >> S_FW_HELLO_CMD_INIT) & M_FW_HELLO_CMD_INIT) #define F_FW_HELLO_CMD_INIT V_FW_HELLO_CMD_INIT(1U) #define S_FW_HELLO_CMD_MASTERDIS 29 #define M_FW_HELLO_CMD_MASTERDIS 0x1 #define V_FW_HELLO_CMD_MASTERDIS(x) ((x) << S_FW_HELLO_CMD_MASTERDIS) #define G_FW_HELLO_CMD_MASTERDIS(x) \ (((x) >> S_FW_HELLO_CMD_MASTERDIS) & M_FW_HELLO_CMD_MASTERDIS) #define F_FW_HELLO_CMD_MASTERDIS V_FW_HELLO_CMD_MASTERDIS(1U) #define S_FW_HELLO_CMD_MASTERFORCE 28 #define M_FW_HELLO_CMD_MASTERFORCE 0x1 #define V_FW_HELLO_CMD_MASTERFORCE(x) ((x) << S_FW_HELLO_CMD_MASTERFORCE) #define G_FW_HELLO_CMD_MASTERFORCE(x) \ (((x) >> S_FW_HELLO_CMD_MASTERFORCE) & M_FW_HELLO_CMD_MASTERFORCE) #define F_FW_HELLO_CMD_MASTERFORCE V_FW_HELLO_CMD_MASTERFORCE(1U) #define S_FW_HELLO_CMD_MBMASTER 24 #define M_FW_HELLO_CMD_MBMASTER 0xf #define V_FW_HELLO_CMD_MBMASTER(x) ((x) << S_FW_HELLO_CMD_MBMASTER) #define G_FW_HELLO_CMD_MBMASTER(x) \ (((x) >> S_FW_HELLO_CMD_MBMASTER) & M_FW_HELLO_CMD_MBMASTER) #define S_FW_HELLO_CMD_MBASYNCNOTINT 23 #define M_FW_HELLO_CMD_MBASYNCNOTINT 0x1 #define V_FW_HELLO_CMD_MBASYNCNOTINT(x) ((x) << S_FW_HELLO_CMD_MBASYNCNOTINT) #define G_FW_HELLO_CMD_MBASYNCNOTINT(x) \ (((x) >> S_FW_HELLO_CMD_MBASYNCNOTINT) & M_FW_HELLO_CMD_MBASYNCNOTINT) #define F_FW_HELLO_CMD_MBASYNCNOTINT V_FW_HELLO_CMD_MBASYNCNOTINT(1U) #define S_FW_HELLO_CMD_MBASYNCNOT 20 #define M_FW_HELLO_CMD_MBASYNCNOT 0x7 #define V_FW_HELLO_CMD_MBASYNCNOT(x) ((x) << S_FW_HELLO_CMD_MBASYNCNOT) #define G_FW_HELLO_CMD_MBASYNCNOT(x) \ (((x) >> S_FW_HELLO_CMD_MBASYNCNOT) & M_FW_HELLO_CMD_MBASYNCNOT) #define S_FW_HELLO_CMD_STAGE 17 #define M_FW_HELLO_CMD_STAGE 0x7 #define V_FW_HELLO_CMD_STAGE(x) ((x) << S_FW_HELLO_CMD_STAGE) #define G_FW_HELLO_CMD_STAGE(x) \ (((x) >> S_FW_HELLO_CMD_STAGE) & M_FW_HELLO_CMD_STAGE) #define S_FW_HELLO_CMD_CLEARINIT 16 #define M_FW_HELLO_CMD_CLEARINIT 0x1 #define V_FW_HELLO_CMD_CLEARINIT(x) ((x) << S_FW_HELLO_CMD_CLEARINIT) #define G_FW_HELLO_CMD_CLEARINIT(x) \ (((x) >> S_FW_HELLO_CMD_CLEARINIT) & M_FW_HELLO_CMD_CLEARINIT) #define F_FW_HELLO_CMD_CLEARINIT V_FW_HELLO_CMD_CLEARINIT(1U) struct fw_bye_cmd { __be32 op_to_write; __be32 retval_len16; __be64 r3; }; struct fw_initialize_cmd { __be32 op_to_write; __be32 retval_len16; __be64 r3; }; enum fw_caps_config_hm { FW_CAPS_CONFIG_HM_PCIE = 0x00000001, FW_CAPS_CONFIG_HM_PL = 0x00000002, FW_CAPS_CONFIG_HM_SGE = 0x00000004, FW_CAPS_CONFIG_HM_CIM = 0x00000008, FW_CAPS_CONFIG_HM_ULPTX = 0x00000010, FW_CAPS_CONFIG_HM_TP = 0x00000020, FW_CAPS_CONFIG_HM_ULPRX = 0x00000040, FW_CAPS_CONFIG_HM_PMRX = 0x00000080, FW_CAPS_CONFIG_HM_PMTX = 0x00000100, FW_CAPS_CONFIG_HM_MC = 0x00000200, FW_CAPS_CONFIG_HM_LE = 0x00000400, FW_CAPS_CONFIG_HM_MPS = 0x00000800, FW_CAPS_CONFIG_HM_XGMAC = 0x00001000, FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000, FW_CAPS_CONFIG_HM_T4DBG = 0x00004000, FW_CAPS_CONFIG_HM_MI = 0x00008000, FW_CAPS_CONFIG_HM_I2CM = 0x00010000, FW_CAPS_CONFIG_HM_NCSI = 0x00020000, FW_CAPS_CONFIG_HM_SMB = 0x00040000, FW_CAPS_CONFIG_HM_MA = 0x00080000, FW_CAPS_CONFIG_HM_EDRAM = 0x00100000, FW_CAPS_CONFIG_HM_PMU = 0x00200000, FW_CAPS_CONFIG_HM_UART = 0x00400000, FW_CAPS_CONFIG_HM_SF = 0x00800000, }; /* * The VF Register Map. * * The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local * bus module (PL) and CPU Interface Module (CIM) components are mapped via * the Slice to Module Map Table (see below) in the Physical Function Register * Map. The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base * and Offset registers in the PF Register Map. The MBDATA base address is * quite constrained as it determines the Mailbox Data addresses for both PFs * and VFs, and therefore must fit in both the VF and PF Register Maps without * overlapping other registers. */ #define FW_T4VF_SGE_BASE_ADDR 0x0000 #define FW_T4VF_MPS_BASE_ADDR 0x0100 #define FW_T4VF_PL_BASE_ADDR 0x0200 #define FW_T4VF_MBDATA_BASE_ADDR 0x0240 #define FW_T6VF_MBDATA_BASE_ADDR 0x0280 /* aligned to mbox size 128B */ #define FW_T4VF_CIM_BASE_ADDR 0x0300 #define FW_T4VF_REGMAP_START 0x0000 #define FW_T4VF_REGMAP_SIZE 0x0400 enum fw_caps_config_nbm { FW_CAPS_CONFIG_NBM_IPMI = 0x00000001, FW_CAPS_CONFIG_NBM_NCSI = 0x00000002, }; enum fw_caps_config_link { FW_CAPS_CONFIG_LINK_PPP = 0x00000001, FW_CAPS_CONFIG_LINK_QFC = 0x00000002, FW_CAPS_CONFIG_LINK_DCBX = 0x00000004, }; enum fw_caps_config_switch { FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001, FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002, }; enum fw_caps_config_nic { FW_CAPS_CONFIG_NIC = 0x00000001, FW_CAPS_CONFIG_NIC_VM = 0x00000002, FW_CAPS_CONFIG_NIC_IDS = 0x00000004, FW_CAPS_CONFIG_NIC_UM = 0x00000008, FW_CAPS_CONFIG_NIC_UM_ISGL = 0x00000010, FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020, FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040, }; enum fw_caps_config_toe { FW_CAPS_CONFIG_TOE = 0x00000001, }; enum fw_caps_config_rdma { FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001, FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002, }; enum fw_caps_config_iscsi { FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001, FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002, FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004, FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008, FW_CAPS_CONFIG_ISCSI_INITIATOR_SSNOFLD = 0x00000010, FW_CAPS_CONFIG_ISCSI_TARGET_SSNOFLD = 0x00000020, FW_CAPS_CONFIG_ISCSI_T10DIF = 0x00000040, FW_CAPS_CONFIG_ISCSI_INITIATOR_CMDOFLD = 0x00000080, FW_CAPS_CONFIG_ISCSI_TARGET_CMDOFLD = 0x00000100, }; enum fw_caps_config_crypto { FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001, FW_CAPS_CONFIG_TLSKEYS = 0x00000002, FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004, }; enum fw_caps_config_fcoe { FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001, FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, FW_CAPS_CONFIG_FCOE_CTRL_OFLD = 0x00000004, FW_CAPS_CONFIG_POFCOE_INITIATOR = 0x00000008, FW_CAPS_CONFIG_POFCOE_TARGET = 0x00000010, }; enum fw_memtype_cf { FW_MEMTYPE_CF_EDC0 = FW_MEMTYPE_EDC0, FW_MEMTYPE_CF_EDC1 = FW_MEMTYPE_EDC1, FW_MEMTYPE_CF_EXTMEM = FW_MEMTYPE_EXTMEM, FW_MEMTYPE_CF_FLASH = FW_MEMTYPE_FLASH, FW_MEMTYPE_CF_INTERNAL = FW_MEMTYPE_INTERNAL, FW_MEMTYPE_CF_EXTMEM1 = FW_MEMTYPE_EXTMEM1, }; struct fw_caps_config_cmd { __be32 op_to_write; __be32 cfvalid_to_len16; __be32 r2; __be32 hwmbitmap; __be16 nbmcaps; __be16 linkcaps; __be16 switchcaps; __be16 r3; __be16 niccaps; __be16 toecaps; __be16 rdmacaps; __be16 cryptocaps; __be16 iscsicaps; __be16 fcoecaps; __be32 cfcsum; __be32 finiver; __be32 finicsum; }; #define S_FW_CAPS_CONFIG_CMD_CFVALID 27 #define M_FW_CAPS_CONFIG_CMD_CFVALID 0x1 #define V_FW_CAPS_CONFIG_CMD_CFVALID(x) ((x) << S_FW_CAPS_CONFIG_CMD_CFVALID) #define G_FW_CAPS_CONFIG_CMD_CFVALID(x) \ (((x) >> S_FW_CAPS_CONFIG_CMD_CFVALID) & M_FW_CAPS_CONFIG_CMD_CFVALID) #define F_FW_CAPS_CONFIG_CMD_CFVALID V_FW_CAPS_CONFIG_CMD_CFVALID(1U) #define S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 24 #define M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 0x7 #define V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \ ((x) << S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) #define G_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \ (((x) >> S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) & \ M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) #define S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 16 #define M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 0xff #define V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \ ((x) << S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) #define G_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \ (((x) >> S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) & \ M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) /* * params command mnemonics */ enum fw_params_mnem { FW_PARAMS_MNEM_DEV = 1, /* device params */ FW_PARAMS_MNEM_PFVF = 2, /* function params */ FW_PARAMS_MNEM_REG = 3, /* limited register access */ FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ FW_PARAMS_MNEM_CHNET = 5, /* chnet params */ FW_PARAMS_MNEM_LAST }; /* * device parameters */ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs * allocated by the device's * Lookup Engine */ FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03, FW_PARAMS_PARAM_DEV_INTFVER_NIC = 0x04, FW_PARAMS_PARAM_DEV_INTFVER_VNIC = 0x05, FW_PARAMS_PARAM_DEV_INTFVER_OFLD = 0x06, FW_PARAMS_PARAM_DEV_INTFVER_RI = 0x07, FW_PARAMS_PARAM_DEV_INTFVER_ISCSIPDU = 0x08, FW_PARAMS_PARAM_DEV_INTFVER_ISCSI = 0x09, FW_PARAMS_PARAM_DEV_INTFVER_FCOE = 0x0A, FW_PARAMS_PARAM_DEV_FWREV = 0x0B, FW_PARAMS_PARAM_DEV_TPREV = 0x0C, FW_PARAMS_PARAM_DEV_CF = 0x0D, FW_PARAMS_PARAM_DEV_BYPASS = 0x0E, FW_PARAMS_PARAM_DEV_PHYFW = 0x0F, FW_PARAMS_PARAM_DEV_LOAD = 0x10, FW_PARAMS_PARAM_DEV_DIAG = 0x11, FW_PARAMS_PARAM_DEV_UCLK = 0x12, /* uP clock in khz */ FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */ FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER= 0x14,/* max supported ADAPTER IRD */ FW_PARAMS_PARAM_DEV_INTFVER_FCOEPDU = 0x15, FW_PARAMS_PARAM_DEV_MCINIT = 0x16, FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, FW_PARAMS_PARAM_DEV_FWCACHE = 0x18, FW_PARAMS_PARAM_DEV_RSSINFO = 0x19, FW_PARAMS_PARAM_DEV_SCFGREV = 0x1A, FW_PARAMS_PARAM_DEV_VPDREV = 0x1B, FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C, FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D, FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E, FW_PARAMS_PARAM_DEV_TPCHMAP = 0x1F, FW_PARAMS_PARAM_DEV_HMA_SIZE = 0x20, FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21, FW_PARAMS_PARAM_DEV_RING_BACKBONE = 0x22, FW_PARAMS_PARAM_DEV_PPOD_EDRAM = 0x23, FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24, FW_PARAMS_PARAM_DEV_ADD_SMAC = 0x25, FW_PARAMS_PARAM_DEV_HPFILTER_REGION_SUPPORT = 0x26, + FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27, }; /* * dev bypass parameters; actions and modes */ enum fw_params_param_dev_bypass { /* actions */ FW_PARAMS_PARAM_DEV_BYPASS_PFAIL = 0x00, FW_PARAMS_PARAM_DEV_BYPASS_CURRENT = 0x01, /* modes */ FW_PARAMS_PARAM_DEV_BYPASS_NORMAL = 0x00, FW_PARAMS_PARAM_DEV_BYPASS_DROP = 0x1, FW_PARAMS_PARAM_DEV_BYPASS_BYPASS = 0x2, }; enum fw_params_param_dev_phyfw { FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD = 0x00, FW_PARAMS_PARAM_DEV_PHYFW_VERSION = 0x01, }; enum fw_params_param_dev_diag { FW_PARAM_DEV_DIAG_TMP = 0x00, FW_PARAM_DEV_DIAG_VDD = 0x01, FW_PARAM_DEV_DIAG_MAXTMPTHRESH = 0x02, }; enum fw_params_param_dev_fwcache { FW_PARAM_DEV_FWCACHE_FLUSH = 0x00, FW_PARAM_DEV_FWCACHE_FLUSHINV = 0x01, }; /* * physical and virtual function parameters */ enum fw_params_param_pfvf { FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00, FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01, FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02, FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03, FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04, FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05, FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06, FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07, FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08, FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09, FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A, FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B, FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C, FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D, FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E, FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F, FW_PARAMS_PARAM_PFVF_RQ_END = 0x10, FW_PARAMS_PARAM_PFVF_PBL_START = 0x11, FW_PARAMS_PARAM_PFVF_PBL_END = 0x12, FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15, FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16, FW_PARAMS_PARAM_PFVF_CQ_START = 0x17, FW_PARAMS_PARAM_PFVF_CQ_END = 0x18, FW_PARAMS_PARAM_PFVF_SRQ_START = 0x19, FW_PARAMS_PARAM_PFVF_SRQ_END = 0x1A, FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, FW_PARAMS_PARAM_PFVF_VIID = 0x24, FW_PARAMS_PARAM_PFVF_CPMASK = 0x25, FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26, FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27, FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28, FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29, FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A, FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B, FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C, FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D, FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E, FW_PARAMS_PARAM_PFVF_ETHOFLD_START = 0x2F, FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30, FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31, FW_PARAMS_PARAM_PFVF_HPFILTER_START = 0x32, FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33, FW_PARAMS_PARAM_PFVF_TLS_START = 0x34, FW_PARAMS_PARAM_PFVF_TLS_END = 0x35, FW_PARAMS_PARAM_PFVF_RAWF_START = 0x36, FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37, FW_PARAMS_PARAM_PFVF_RSSKEYINFO = 0x38, FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39, FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A, FW_PARAMS_PARAM_PFVF_PPOD_EDRAM_START = 0x3B, FW_PARAMS_PARAM_PFVF_PPOD_EDRAM_END = 0x3C, FW_PARAMS_PARAM_PFVF_MAX_PKTS_PER_ETH_TX_PKTS_WR = 0x3D, }; /* * dma queue parameters */ enum fw_params_param_dmaq { FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00, FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, FW_PARAMS_PARAM_DMAQ_IQ_INTIDX = 0x02, FW_PARAMS_PARAM_DMAQ_IQ_DCA = 0x03, FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10, FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11, FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12, FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13, FW_PARAMS_PARAM_DMAQ_EQ_DCA = 0x14, FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20, FW_PARAMS_PARAM_DMAQ_FLM_DCA = 0x30 }; /* * chnet parameters */ enum fw_params_param_chnet { FW_PARAMS_PARAM_CHNET_FLAGS = 0x00, }; enum fw_params_param_chnet_flags { FW_PARAMS_PARAM_CHNET_FLAGS_ENABLE_IPV6 = 0x1, FW_PARAMS_PARAM_CHNET_FLAGS_ENABLE_DAD = 0x2, FW_PARAMS_PARAM_CHNET_FLAGS_ENABLE_MLDV2= 0x4, }; #define S_FW_PARAMS_MNEM 24 #define M_FW_PARAMS_MNEM 0xff #define V_FW_PARAMS_MNEM(x) ((x) << S_FW_PARAMS_MNEM) #define G_FW_PARAMS_MNEM(x) \ (((x) >> S_FW_PARAMS_MNEM) & M_FW_PARAMS_MNEM) #define S_FW_PARAMS_PARAM_X 16 #define M_FW_PARAMS_PARAM_X 0xff #define V_FW_PARAMS_PARAM_X(x) ((x) << S_FW_PARAMS_PARAM_X) #define G_FW_PARAMS_PARAM_X(x) \ (((x) >> S_FW_PARAMS_PARAM_X) & M_FW_PARAMS_PARAM_X) #define S_FW_PARAMS_PARAM_Y 8 #define M_FW_PARAMS_PARAM_Y 0xff #define V_FW_PARAMS_PARAM_Y(x) ((x) << S_FW_PARAMS_PARAM_Y) #define G_FW_PARAMS_PARAM_Y(x) \ (((x) >> S_FW_PARAMS_PARAM_Y) & M_FW_PARAMS_PARAM_Y) #define S_FW_PARAMS_PARAM_Z 0 #define M_FW_PARAMS_PARAM_Z 0xff #define V_FW_PARAMS_PARAM_Z(x) ((x) << S_FW_PARAMS_PARAM_Z) #define G_FW_PARAMS_PARAM_Z(x) \ (((x) >> S_FW_PARAMS_PARAM_Z) & M_FW_PARAMS_PARAM_Z) #define S_FW_PARAMS_PARAM_XYZ 0 #define M_FW_PARAMS_PARAM_XYZ 0xffffff #define V_FW_PARAMS_PARAM_XYZ(x) ((x) << S_FW_PARAMS_PARAM_XYZ) #define G_FW_PARAMS_PARAM_XYZ(x) \ (((x) >> S_FW_PARAMS_PARAM_XYZ) & M_FW_PARAMS_PARAM_XYZ) #define S_FW_PARAMS_PARAM_YZ 0 #define M_FW_PARAMS_PARAM_YZ 0xffff #define V_FW_PARAMS_PARAM_YZ(x) ((x) << S_FW_PARAMS_PARAM_YZ) #define G_FW_PARAMS_PARAM_YZ(x) \ (((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ) #define S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN 31 #define M_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN 0x1 #define V_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN(x) \ ((x) << S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN) #define G_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN(x) \ (((x) >> S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN) & \ M_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN) #define S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT 24 #define M_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT 0x3 #define V_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT(x) \ ((x) << S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT) #define G_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT(x) \ (((x) >> S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT) & \ M_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT) #define S_FW_PARAMS_PARAM_DMAQ_DCA_ST 0 #define M_FW_PARAMS_PARAM_DMAQ_DCA_ST 0x7ff #define V_FW_PARAMS_PARAM_DMAQ_DCA_ST(x) \ ((x) << S_FW_PARAMS_PARAM_DMAQ_DCA_ST) #define G_FW_PARAMS_PARAM_DMAQ_DCA_ST(x) \ (((x) >> S_FW_PARAMS_PARAM_DMAQ_DCA_ST) & M_FW_PARAMS_PARAM_DMAQ_DCA_ST) #define S_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE 29 #define M_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE 0x7 #define V_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE(x) \ ((x) << S_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE) #define G_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE(x) \ (((x) >> S_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE) & \ M_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE) #define S_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX 0 #define M_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX 0x3ff #define V_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX(x) \ ((x) << S_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX) #define G_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX(x) \ (((x) >> S_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX) & \ M_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX) struct fw_params_cmd { __be32 op_to_vfn; __be32 retval_len16; struct fw_params_param { __be32 mnem; __be32 val; } param[7]; }; #define S_FW_PARAMS_CMD_PFN 8 #define M_FW_PARAMS_CMD_PFN 0x7 #define V_FW_PARAMS_CMD_PFN(x) ((x) << S_FW_PARAMS_CMD_PFN) #define G_FW_PARAMS_CMD_PFN(x) \ (((x) >> S_FW_PARAMS_CMD_PFN) & M_FW_PARAMS_CMD_PFN) #define S_FW_PARAMS_CMD_VFN 0 #define M_FW_PARAMS_CMD_VFN 0xff #define V_FW_PARAMS_CMD_VFN(x) ((x) << S_FW_PARAMS_CMD_VFN) #define G_FW_PARAMS_CMD_VFN(x) \ (((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN) struct fw_pfvf_cmd { __be32 op_to_vfn; __be32 retval_len16; __be32 niqflint_niq; __be32 type_to_neq; __be32 tc_to_nexactf; __be32 r_caps_to_nethctrl; __be16 nricq; __be16 nriqp; __be32 r4; }; #define S_FW_PFVF_CMD_PFN 8 #define M_FW_PFVF_CMD_PFN 0x7 #define V_FW_PFVF_CMD_PFN(x) ((x) << S_FW_PFVF_CMD_PFN) #define G_FW_PFVF_CMD_PFN(x) \ (((x) >> S_FW_PFVF_CMD_PFN) & M_FW_PFVF_CMD_PFN) #define S_FW_PFVF_CMD_VFN 0 #define M_FW_PFVF_CMD_VFN 0xff #define V_FW_PFVF_CMD_VFN(x) ((x) << S_FW_PFVF_CMD_VFN) #define G_FW_PFVF_CMD_VFN(x) \ (((x) >> S_FW_PFVF_CMD_VFN) & M_FW_PFVF_CMD_VFN) #define S_FW_PFVF_CMD_NIQFLINT 20 #define M_FW_PFVF_CMD_NIQFLINT 0xfff #define V_FW_PFVF_CMD_NIQFLINT(x) ((x) << S_FW_PFVF_CMD_NIQFLINT) #define G_FW_PFVF_CMD_NIQFLINT(x) \ (((x) >> S_FW_PFVF_CMD_NIQFLINT) & M_FW_PFVF_CMD_NIQFLINT) #define S_FW_PFVF_CMD_NIQ 0 #define M_FW_PFVF_CMD_NIQ 0xfffff #define V_FW_PFVF_CMD_NIQ(x) ((x) << S_FW_PFVF_CMD_NIQ) #define G_FW_PFVF_CMD_NIQ(x) \ (((x) >> S_FW_PFVF_CMD_NIQ) & M_FW_PFVF_CMD_NIQ) #define S_FW_PFVF_CMD_TYPE 31 #define M_FW_PFVF_CMD_TYPE 0x1 #define V_FW_PFVF_CMD_TYPE(x) ((x) << S_FW_PFVF_CMD_TYPE) #define G_FW_PFVF_CMD_TYPE(x) \ (((x) >> S_FW_PFVF_CMD_TYPE) & M_FW_PFVF_CMD_TYPE) #define F_FW_PFVF_CMD_TYPE V_FW_PFVF_CMD_TYPE(1U) #define S_FW_PFVF_CMD_CMASK 24 #define M_FW_PFVF_CMD_CMASK 0xf #define V_FW_PFVF_CMD_CMASK(x) ((x) << S_FW_PFVF_CMD_CMASK) #define G_FW_PFVF_CMD_CMASK(x) \ (((x) >> S_FW_PFVF_CMD_CMASK) & M_FW_PFVF_CMD_CMASK) #define S_FW_PFVF_CMD_PMASK 20 #define M_FW_PFVF_CMD_PMASK 0xf #define V_FW_PFVF_CMD_PMASK(x) ((x) << S_FW_PFVF_CMD_PMASK) #define G_FW_PFVF_CMD_PMASK(x) \ (((x) >> S_FW_PFVF_CMD_PMASK) & M_FW_PFVF_CMD_PMASK) #define S_FW_PFVF_CMD_NEQ 0 #define M_FW_PFVF_CMD_NEQ 0xfffff #define V_FW_PFVF_CMD_NEQ(x) ((x) << S_FW_PFVF_CMD_NEQ) #define G_FW_PFVF_CMD_NEQ(x) \ (((x) >> S_FW_PFVF_CMD_NEQ) & M_FW_PFVF_CMD_NEQ) #define S_FW_PFVF_CMD_TC 24 #define M_FW_PFVF_CMD_TC 0xff #define V_FW_PFVF_CMD_TC(x) ((x) << S_FW_PFVF_CMD_TC) #define G_FW_PFVF_CMD_TC(x) \ (((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC) #define S_FW_PFVF_CMD_NVI 16 #define M_FW_PFVF_CMD_NVI 0xff #define V_FW_PFVF_CMD_NVI(x) ((x) << S_FW_PFVF_CMD_NVI) #define G_FW_PFVF_CMD_NVI(x) \ (((x) >> S_FW_PFVF_CMD_NVI) & M_FW_PFVF_CMD_NVI) #define S_FW_PFVF_CMD_NEXACTF 0 #define M_FW_PFVF_CMD_NEXACTF 0xffff #define V_FW_PFVF_CMD_NEXACTF(x) ((x) << S_FW_PFVF_CMD_NEXACTF) #define G_FW_PFVF_CMD_NEXACTF(x) \ (((x) >> S_FW_PFVF_CMD_NEXACTF) & M_FW_PFVF_CMD_NEXACTF) #define S_FW_PFVF_CMD_R_CAPS 24 #define M_FW_PFVF_CMD_R_CAPS 0xff #define V_FW_PFVF_CMD_R_CAPS(x) ((x) << S_FW_PFVF_CMD_R_CAPS) #define G_FW_PFVF_CMD_R_CAPS(x) \ (((x) >> S_FW_PFVF_CMD_R_CAPS) & M_FW_PFVF_CMD_R_CAPS) #define S_FW_PFVF_CMD_WX_CAPS 16 #define M_FW_PFVF_CMD_WX_CAPS 0xff #define V_FW_PFVF_CMD_WX_CAPS(x) ((x) << S_FW_PFVF_CMD_WX_CAPS) #define G_FW_PFVF_CMD_WX_CAPS(x) \ (((x) >> S_FW_PFVF_CMD_WX_CAPS) & M_FW_PFVF_CMD_WX_CAPS) #define S_FW_PFVF_CMD_NETHCTRL 0 #define M_FW_PFVF_CMD_NETHCTRL 0xffff #define V_FW_PFVF_CMD_NETHCTRL(x) ((x) << S_FW_PFVF_CMD_NETHCTRL) #define G_FW_PFVF_CMD_NETHCTRL(x) \ (((x) >> S_FW_PFVF_CMD_NETHCTRL) & M_FW_PFVF_CMD_NETHCTRL) /* * ingress queue type; the first 1K ingress queues can have associated 0, * 1 or 2 free lists and an interrupt, all other ingress queues lack these * capabilities */ enum fw_iq_type { FW_IQ_TYPE_FL_INT_CAP, FW_IQ_TYPE_NO_FL_INT_CAP, FW_IQ_TYPE_VF_CQ }; enum fw_iq_iqtype { FW_IQ_IQTYPE_OTHER, FW_IQ_IQTYPE_NIC, FW_IQ_IQTYPE_OFLD, }; struct fw_iq_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; __be16 physiqid; __be16 iqid; __be16 fl0id; __be16 fl1id; __be32 type_to_iqandstindex; __be16 iqdroprss_to_iqesize; __be16 iqsize; __be64 iqaddr; __be32 iqns_to_fl0congen; __be16 fl0dcaen_to_fl0cidxfthresh; __be16 fl0size; __be64 fl0addr; __be32 fl1cngchmap_to_fl1congen; __be16 fl1dcaen_to_fl1cidxfthresh; __be16 fl1size; __be64 fl1addr; }; #define S_FW_IQ_CMD_PFN 8 #define M_FW_IQ_CMD_PFN 0x7 #define V_FW_IQ_CMD_PFN(x) ((x) << S_FW_IQ_CMD_PFN) #define G_FW_IQ_CMD_PFN(x) \ (((x) >> S_FW_IQ_CMD_PFN) & M_FW_IQ_CMD_PFN) #define S_FW_IQ_CMD_VFN 0 #define M_FW_IQ_CMD_VFN 0xff #define V_FW_IQ_CMD_VFN(x) ((x) << S_FW_IQ_CMD_VFN) #define G_FW_IQ_CMD_VFN(x) \ (((x) >> S_FW_IQ_CMD_VFN) & M_FW_IQ_CMD_VFN) #define S_FW_IQ_CMD_ALLOC 31 #define M_FW_IQ_CMD_ALLOC 0x1 #define V_FW_IQ_CMD_ALLOC(x) ((x) << S_FW_IQ_CMD_ALLOC) #define G_FW_IQ_CMD_ALLOC(x) \ (((x) >> S_FW_IQ_CMD_ALLOC) & M_FW_IQ_CMD_ALLOC) #define F_FW_IQ_CMD_ALLOC V_FW_IQ_CMD_ALLOC(1U) #define S_FW_IQ_CMD_FREE 30 #define M_FW_IQ_CMD_FREE 0x1 #define V_FW_IQ_CMD_FREE(x) ((x) << S_FW_IQ_CMD_FREE) #define G_FW_IQ_CMD_FREE(x) \ (((x) >> S_FW_IQ_CMD_FREE) & M_FW_IQ_CMD_FREE) #define F_FW_IQ_CMD_FREE V_FW_IQ_CMD_FREE(1U) #define S_FW_IQ_CMD_MODIFY 29 #define M_FW_IQ_CMD_MODIFY 0x1 #define V_FW_IQ_CMD_MODIFY(x) ((x) << S_FW_IQ_CMD_MODIFY) #define G_FW_IQ_CMD_MODIFY(x) \ (((x) >> S_FW_IQ_CMD_MODIFY) & M_FW_IQ_CMD_MODIFY) #define F_FW_IQ_CMD_MODIFY V_FW_IQ_CMD_MODIFY(1U) #define S_FW_IQ_CMD_IQSTART 28 #define M_FW_IQ_CMD_IQSTART 0x1 #define V_FW_IQ_CMD_IQSTART(x) ((x) << S_FW_IQ_CMD_IQSTART) #define G_FW_IQ_CMD_IQSTART(x) \ (((x) >> S_FW_IQ_CMD_IQSTART) & M_FW_IQ_CMD_IQSTART) #define F_FW_IQ_CMD_IQSTART V_FW_IQ_CMD_IQSTART(1U) #define S_FW_IQ_CMD_IQSTOP 27 #define M_FW_IQ_CMD_IQSTOP 0x1 #define V_FW_IQ_CMD_IQSTOP(x) ((x) << S_FW_IQ_CMD_IQSTOP) #define G_FW_IQ_CMD_IQSTOP(x) \ (((x) >> S_FW_IQ_CMD_IQSTOP) & M_FW_IQ_CMD_IQSTOP) #define F_FW_IQ_CMD_IQSTOP V_FW_IQ_CMD_IQSTOP(1U) #define S_FW_IQ_CMD_TYPE 29 #define M_FW_IQ_CMD_TYPE 0x7 #define V_FW_IQ_CMD_TYPE(x) ((x) << S_FW_IQ_CMD_TYPE) #define G_FW_IQ_CMD_TYPE(x) \ (((x) >> S_FW_IQ_CMD_TYPE) & M_FW_IQ_CMD_TYPE) #define S_FW_IQ_CMD_IQASYNCH 28 #define M_FW_IQ_CMD_IQASYNCH 0x1 #define V_FW_IQ_CMD_IQASYNCH(x) ((x) << S_FW_IQ_CMD_IQASYNCH) #define G_FW_IQ_CMD_IQASYNCH(x) \ (((x) >> S_FW_IQ_CMD_IQASYNCH) & M_FW_IQ_CMD_IQASYNCH) #define F_FW_IQ_CMD_IQASYNCH V_FW_IQ_CMD_IQASYNCH(1U) #define S_FW_IQ_CMD_VIID 16 #define M_FW_IQ_CMD_VIID 0xfff #define V_FW_IQ_CMD_VIID(x) ((x) << S_FW_IQ_CMD_VIID) #define G_FW_IQ_CMD_VIID(x) \ (((x) >> S_FW_IQ_CMD_VIID) & M_FW_IQ_CMD_VIID) #define S_FW_IQ_CMD_IQANDST 15 #define M_FW_IQ_CMD_IQANDST 0x1 #define V_FW_IQ_CMD_IQANDST(x) ((x) << S_FW_IQ_CMD_IQANDST) #define G_FW_IQ_CMD_IQANDST(x) \ (((x) >> S_FW_IQ_CMD_IQANDST) & M_FW_IQ_CMD_IQANDST) #define F_FW_IQ_CMD_IQANDST V_FW_IQ_CMD_IQANDST(1U) #define S_FW_IQ_CMD_IQANUS 14 #define M_FW_IQ_CMD_IQANUS 0x1 #define V_FW_IQ_CMD_IQANUS(x) ((x) << S_FW_IQ_CMD_IQANUS) #define G_FW_IQ_CMD_IQANUS(x) \ (((x) >> S_FW_IQ_CMD_IQANUS) & M_FW_IQ_CMD_IQANUS) #define F_FW_IQ_CMD_IQANUS V_FW_IQ_CMD_IQANUS(1U) #define S_FW_IQ_CMD_IQANUD 12 #define M_FW_IQ_CMD_IQANUD 0x3 #define V_FW_IQ_CMD_IQANUD(x) ((x) << S_FW_IQ_CMD_IQANUD) #define G_FW_IQ_CMD_IQANUD(x) \ (((x) >> S_FW_IQ_CMD_IQANUD) & M_FW_IQ_CMD_IQANUD) #define S_FW_IQ_CMD_IQANDSTINDEX 0 #define M_FW_IQ_CMD_IQANDSTINDEX 0xfff #define V_FW_IQ_CMD_IQANDSTINDEX(x) ((x) << S_FW_IQ_CMD_IQANDSTINDEX) #define G_FW_IQ_CMD_IQANDSTINDEX(x) \ (((x) >> S_FW_IQ_CMD_IQANDSTINDEX) & M_FW_IQ_CMD_IQANDSTINDEX) #define S_FW_IQ_CMD_IQDROPRSS 15 #define M_FW_IQ_CMD_IQDROPRSS 0x1 #define V_FW_IQ_CMD_IQDROPRSS(x) ((x) << S_FW_IQ_CMD_IQDROPRSS) #define G_FW_IQ_CMD_IQDROPRSS(x) \ (((x) >> S_FW_IQ_CMD_IQDROPRSS) & M_FW_IQ_CMD_IQDROPRSS) #define F_FW_IQ_CMD_IQDROPRSS V_FW_IQ_CMD_IQDROPRSS(1U) #define S_FW_IQ_CMD_IQGTSMODE 14 #define M_FW_IQ_CMD_IQGTSMODE 0x1 #define V_FW_IQ_CMD_IQGTSMODE(x) ((x) << S_FW_IQ_CMD_IQGTSMODE) #define G_FW_IQ_CMD_IQGTSMODE(x) \ (((x) >> S_FW_IQ_CMD_IQGTSMODE) & M_FW_IQ_CMD_IQGTSMODE) #define F_FW_IQ_CMD_IQGTSMODE V_FW_IQ_CMD_IQGTSMODE(1U) #define S_FW_IQ_CMD_IQPCIECH 12 #define M_FW_IQ_CMD_IQPCIECH 0x3 #define V_FW_IQ_CMD_IQPCIECH(x) ((x) << S_FW_IQ_CMD_IQPCIECH) #define G_FW_IQ_CMD_IQPCIECH(x) \ (((x) >> S_FW_IQ_CMD_IQPCIECH) & M_FW_IQ_CMD_IQPCIECH) #define S_FW_IQ_CMD_IQDCAEN 11 #define M_FW_IQ_CMD_IQDCAEN 0x1 #define V_FW_IQ_CMD_IQDCAEN(x) ((x) << S_FW_IQ_CMD_IQDCAEN) #define G_FW_IQ_CMD_IQDCAEN(x) \ (((x) >> S_FW_IQ_CMD_IQDCAEN) & M_FW_IQ_CMD_IQDCAEN) #define F_FW_IQ_CMD_IQDCAEN V_FW_IQ_CMD_IQDCAEN(1U) #define S_FW_IQ_CMD_IQDCACPU 6 #define M_FW_IQ_CMD_IQDCACPU 0x1f #define V_FW_IQ_CMD_IQDCACPU(x) ((x) << S_FW_IQ_CMD_IQDCACPU) #define G_FW_IQ_CMD_IQDCACPU(x) \ (((x) >> S_FW_IQ_CMD_IQDCACPU) & M_FW_IQ_CMD_IQDCACPU) #define S_FW_IQ_CMD_IQINTCNTTHRESH 4 #define M_FW_IQ_CMD_IQINTCNTTHRESH 0x3 #define V_FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << S_FW_IQ_CMD_IQINTCNTTHRESH) #define G_FW_IQ_CMD_IQINTCNTTHRESH(x) \ (((x) >> S_FW_IQ_CMD_IQINTCNTTHRESH) & M_FW_IQ_CMD_IQINTCNTTHRESH) #define S_FW_IQ_CMD_IQO 3 #define M_FW_IQ_CMD_IQO 0x1 #define V_FW_IQ_CMD_IQO(x) ((x) << S_FW_IQ_CMD_IQO) #define G_FW_IQ_CMD_IQO(x) \ (((x) >> S_FW_IQ_CMD_IQO) & M_FW_IQ_CMD_IQO) #define F_FW_IQ_CMD_IQO V_FW_IQ_CMD_IQO(1U) #define S_FW_IQ_CMD_IQCPRIO 2 #define M_FW_IQ_CMD_IQCPRIO 0x1 #define V_FW_IQ_CMD_IQCPRIO(x) ((x) << S_FW_IQ_CMD_IQCPRIO) #define G_FW_IQ_CMD_IQCPRIO(x) \ (((x) >> S_FW_IQ_CMD_IQCPRIO) & M_FW_IQ_CMD_IQCPRIO) #define F_FW_IQ_CMD_IQCPRIO V_FW_IQ_CMD_IQCPRIO(1U) #define S_FW_IQ_CMD_IQESIZE 0 #define M_FW_IQ_CMD_IQESIZE 0x3 #define V_FW_IQ_CMD_IQESIZE(x) ((x) << S_FW_IQ_CMD_IQESIZE) #define G_FW_IQ_CMD_IQESIZE(x) \ (((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE) #define S_FW_IQ_CMD_IQNS 31 #define M_FW_IQ_CMD_IQNS 0x1 #define V_FW_IQ_CMD_IQNS(x) ((x) << S_FW_IQ_CMD_IQNS) #define G_FW_IQ_CMD_IQNS(x) \ (((x) >> S_FW_IQ_CMD_IQNS) & M_FW_IQ_CMD_IQNS) #define F_FW_IQ_CMD_IQNS V_FW_IQ_CMD_IQNS(1U) #define S_FW_IQ_CMD_IQRO 30 #define M_FW_IQ_CMD_IQRO 0x1 #define V_FW_IQ_CMD_IQRO(x) ((x) << S_FW_IQ_CMD_IQRO) #define G_FW_IQ_CMD_IQRO(x) \ (((x) >> S_FW_IQ_CMD_IQRO) & M_FW_IQ_CMD_IQRO) #define F_FW_IQ_CMD_IQRO V_FW_IQ_CMD_IQRO(1U) #define S_FW_IQ_CMD_IQFLINTIQHSEN 28 #define M_FW_IQ_CMD_IQFLINTIQHSEN 0x3 #define V_FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << S_FW_IQ_CMD_IQFLINTIQHSEN) #define G_FW_IQ_CMD_IQFLINTIQHSEN(x) \ (((x) >> S_FW_IQ_CMD_IQFLINTIQHSEN) & M_FW_IQ_CMD_IQFLINTIQHSEN) #define S_FW_IQ_CMD_IQFLINTCONGEN 27 #define M_FW_IQ_CMD_IQFLINTCONGEN 0x1 #define V_FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << S_FW_IQ_CMD_IQFLINTCONGEN) #define G_FW_IQ_CMD_IQFLINTCONGEN(x) \ (((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN) #define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U) #define S_FW_IQ_CMD_IQFLINTISCSIC 26 #define M_FW_IQ_CMD_IQFLINTISCSIC 0x1 #define V_FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << S_FW_IQ_CMD_IQFLINTISCSIC) #define G_FW_IQ_CMD_IQFLINTISCSIC(x) \ (((x) >> S_FW_IQ_CMD_IQFLINTISCSIC) & M_FW_IQ_CMD_IQFLINTISCSIC) #define F_FW_IQ_CMD_IQFLINTISCSIC V_FW_IQ_CMD_IQFLINTISCSIC(1U) #define S_FW_IQ_CMD_IQTYPE 24 #define M_FW_IQ_CMD_IQTYPE 0x3 #define V_FW_IQ_CMD_IQTYPE(x) ((x) << S_FW_IQ_CMD_IQTYPE) #define G_FW_IQ_CMD_IQTYPE(x) \ (((x) >> S_FW_IQ_CMD_IQTYPE) & M_FW_IQ_CMD_IQTYPE) #define S_FW_IQ_CMD_FL0CNGCHMAP 20 #define M_FW_IQ_CMD_FL0CNGCHMAP 0xf #define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP) #define G_FW_IQ_CMD_FL0CNGCHMAP(x) \ (((x) >> S_FW_IQ_CMD_FL0CNGCHMAP) & M_FW_IQ_CMD_FL0CNGCHMAP) #define S_FW_IQ_CMD_FL0CONGDROP 16 #define M_FW_IQ_CMD_FL0CONGDROP 0x1 #define V_FW_IQ_CMD_FL0CONGDROP(x) ((x) << S_FW_IQ_CMD_FL0CONGDROP) #define G_FW_IQ_CMD_FL0CONGDROP(x) \ (((x) >> S_FW_IQ_CMD_FL0CONGDROP) & M_FW_IQ_CMD_FL0CONGDROP) #define F_FW_IQ_CMD_FL0CONGDROP V_FW_IQ_CMD_FL0CONGDROP(1U) #define S_FW_IQ_CMD_FL0CACHELOCK 15 #define M_FW_IQ_CMD_FL0CACHELOCK 0x1 #define V_FW_IQ_CMD_FL0CACHELOCK(x) ((x) << S_FW_IQ_CMD_FL0CACHELOCK) #define G_FW_IQ_CMD_FL0CACHELOCK(x) \ (((x) >> S_FW_IQ_CMD_FL0CACHELOCK) & M_FW_IQ_CMD_FL0CACHELOCK) #define F_FW_IQ_CMD_FL0CACHELOCK V_FW_IQ_CMD_FL0CACHELOCK(1U) #define S_FW_IQ_CMD_FL0DBP 14 #define M_FW_IQ_CMD_FL0DBP 0x1 #define V_FW_IQ_CMD_FL0DBP(x) ((x) << S_FW_IQ_CMD_FL0DBP) #define G_FW_IQ_CMD_FL0DBP(x) \ (((x) >> S_FW_IQ_CMD_FL0DBP) & M_FW_IQ_CMD_FL0DBP) #define F_FW_IQ_CMD_FL0DBP V_FW_IQ_CMD_FL0DBP(1U) #define S_FW_IQ_CMD_FL0DATANS 13 #define M_FW_IQ_CMD_FL0DATANS 0x1 #define V_FW_IQ_CMD_FL0DATANS(x) ((x) << S_FW_IQ_CMD_FL0DATANS) #define G_FW_IQ_CMD_FL0DATANS(x) \ (((x) >> S_FW_IQ_CMD_FL0DATANS) & M_FW_IQ_CMD_FL0DATANS) #define F_FW_IQ_CMD_FL0DATANS V_FW_IQ_CMD_FL0DATANS(1U) #define S_FW_IQ_CMD_FL0DATARO 12 #define M_FW_IQ_CMD_FL0DATARO 0x1 #define V_FW_IQ_CMD_FL0DATARO(x) ((x) << S_FW_IQ_CMD_FL0DATARO) #define G_FW_IQ_CMD_FL0DATARO(x) \ (((x) >> S_FW_IQ_CMD_FL0DATARO) & M_FW_IQ_CMD_FL0DATARO) #define F_FW_IQ_CMD_FL0DATARO V_FW_IQ_CMD_FL0DATARO(1U) #define S_FW_IQ_CMD_FL0CONGCIF 11 #define M_FW_IQ_CMD_FL0CONGCIF 0x1 #define V_FW_IQ_CMD_FL0CONGCIF(x) ((x) << S_FW_IQ_CMD_FL0CONGCIF) #define G_FW_IQ_CMD_FL0CONGCIF(x) \ (((x) >> S_FW_IQ_CMD_FL0CONGCIF) & M_FW_IQ_CMD_FL0CONGCIF) #define F_FW_IQ_CMD_FL0CONGCIF V_FW_IQ_CMD_FL0CONGCIF(1U) #define S_FW_IQ_CMD_FL0ONCHIP 10 #define M_FW_IQ_CMD_FL0ONCHIP 0x1 #define V_FW_IQ_CMD_FL0ONCHIP(x) ((x) << S_FW_IQ_CMD_FL0ONCHIP) #define G_FW_IQ_CMD_FL0ONCHIP(x) \ (((x) >> S_FW_IQ_CMD_FL0ONCHIP) & M_FW_IQ_CMD_FL0ONCHIP) #define F_FW_IQ_CMD_FL0ONCHIP V_FW_IQ_CMD_FL0ONCHIP(1U) #define S_FW_IQ_CMD_FL0STATUSPGNS 9 #define M_FW_IQ_CMD_FL0STATUSPGNS 0x1 #define V_FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << S_FW_IQ_CMD_FL0STATUSPGNS) #define G_FW_IQ_CMD_FL0STATUSPGNS(x) \ (((x) >> S_FW_IQ_CMD_FL0STATUSPGNS) & M_FW_IQ_CMD_FL0STATUSPGNS) #define F_FW_IQ_CMD_FL0STATUSPGNS V_FW_IQ_CMD_FL0STATUSPGNS(1U) #define S_FW_IQ_CMD_FL0STATUSPGRO 8 #define M_FW_IQ_CMD_FL0STATUSPGRO 0x1 #define V_FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << S_FW_IQ_CMD_FL0STATUSPGRO) #define G_FW_IQ_CMD_FL0STATUSPGRO(x) \ (((x) >> S_FW_IQ_CMD_FL0STATUSPGRO) & M_FW_IQ_CMD_FL0STATUSPGRO) #define F_FW_IQ_CMD_FL0STATUSPGRO V_FW_IQ_CMD_FL0STATUSPGRO(1U) #define S_FW_IQ_CMD_FL0FETCHNS 7 #define M_FW_IQ_CMD_FL0FETCHNS 0x1 #define V_FW_IQ_CMD_FL0FETCHNS(x) ((x) << S_FW_IQ_CMD_FL0FETCHNS) #define G_FW_IQ_CMD_FL0FETCHNS(x) \ (((x) >> S_FW_IQ_CMD_FL0FETCHNS) & M_FW_IQ_CMD_FL0FETCHNS) #define F_FW_IQ_CMD_FL0FETCHNS V_FW_IQ_CMD_FL0FETCHNS(1U) #define S_FW_IQ_CMD_FL0FETCHRO 6 #define M_FW_IQ_CMD_FL0FETCHRO 0x1 #define V_FW_IQ_CMD_FL0FETCHRO(x) ((x) << S_FW_IQ_CMD_FL0FETCHRO) #define G_FW_IQ_CMD_FL0FETCHRO(x) \ (((x) >> S_FW_IQ_CMD_FL0FETCHRO) & M_FW_IQ_CMD_FL0FETCHRO) #define F_FW_IQ_CMD_FL0FETCHRO V_FW_IQ_CMD_FL0FETCHRO(1U) #define S_FW_IQ_CMD_FL0HOSTFCMODE 4 #define M_FW_IQ_CMD_FL0HOSTFCMODE 0x3 #define V_FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << S_FW_IQ_CMD_FL0HOSTFCMODE) #define G_FW_IQ_CMD_FL0HOSTFCMODE(x) \ (((x) >> S_FW_IQ_CMD_FL0HOSTFCMODE) & M_FW_IQ_CMD_FL0HOSTFCMODE) #define S_FW_IQ_CMD_FL0CPRIO 3 #define M_FW_IQ_CMD_FL0CPRIO 0x1 #define V_FW_IQ_CMD_FL0CPRIO(x) ((x) << S_FW_IQ_CMD_FL0CPRIO) #define G_FW_IQ_CMD_FL0CPRIO(x) \ (((x) >> S_FW_IQ_CMD_FL0CPRIO) & M_FW_IQ_CMD_FL0CPRIO) #define F_FW_IQ_CMD_FL0CPRIO V_FW_IQ_CMD_FL0CPRIO(1U) #define S_FW_IQ_CMD_FL0PADEN 2 #define M_FW_IQ_CMD_FL0PADEN 0x1 #define V_FW_IQ_CMD_FL0PADEN(x) ((x) << S_FW_IQ_CMD_FL0PADEN) #define G_FW_IQ_CMD_FL0PADEN(x) \ (((x) >> S_FW_IQ_CMD_FL0PADEN) & M_FW_IQ_CMD_FL0PADEN) #define F_FW_IQ_CMD_FL0PADEN V_FW_IQ_CMD_FL0PADEN(1U) #define S_FW_IQ_CMD_FL0PACKEN 1 #define M_FW_IQ_CMD_FL0PACKEN 0x1 #define V_FW_IQ_CMD_FL0PACKEN(x) ((x) << S_FW_IQ_CMD_FL0PACKEN) #define G_FW_IQ_CMD_FL0PACKEN(x) \ (((x) >> S_FW_IQ_CMD_FL0PACKEN) & M_FW_IQ_CMD_FL0PACKEN) #define F_FW_IQ_CMD_FL0PACKEN V_FW_IQ_CMD_FL0PACKEN(1U) #define S_FW_IQ_CMD_FL0CONGEN 0 #define M_FW_IQ_CMD_FL0CONGEN 0x1 #define V_FW_IQ_CMD_FL0CONGEN(x) ((x) << S_FW_IQ_CMD_FL0CONGEN) #define G_FW_IQ_CMD_FL0CONGEN(x) \ (((x) >> S_FW_IQ_CMD_FL0CONGEN) & M_FW_IQ_CMD_FL0CONGEN) #define F_FW_IQ_CMD_FL0CONGEN V_FW_IQ_CMD_FL0CONGEN(1U) #define S_FW_IQ_CMD_FL0DCAEN 15 #define M_FW_IQ_CMD_FL0DCAEN 0x1 #define V_FW_IQ_CMD_FL0DCAEN(x) ((x) << S_FW_IQ_CMD_FL0DCAEN) #define G_FW_IQ_CMD_FL0DCAEN(x) \ (((x) >> S_FW_IQ_CMD_FL0DCAEN) & M_FW_IQ_CMD_FL0DCAEN) #define F_FW_IQ_CMD_FL0DCAEN V_FW_IQ_CMD_FL0DCAEN(1U) #define S_FW_IQ_CMD_FL0DCACPU 10 #define M_FW_IQ_CMD_FL0DCACPU 0x1f #define V_FW_IQ_CMD_FL0DCACPU(x) ((x) << S_FW_IQ_CMD_FL0DCACPU) #define G_FW_IQ_CMD_FL0DCACPU(x) \ (((x) >> S_FW_IQ_CMD_FL0DCACPU) & M_FW_IQ_CMD_FL0DCACPU) #define S_FW_IQ_CMD_FL0FBMIN 7 #define M_FW_IQ_CMD_FL0FBMIN 0x7 #define V_FW_IQ_CMD_FL0FBMIN(x) ((x) << S_FW_IQ_CMD_FL0FBMIN) #define G_FW_IQ_CMD_FL0FBMIN(x) \ (((x) >> S_FW_IQ_CMD_FL0FBMIN) & M_FW_IQ_CMD_FL0FBMIN) #define S_FW_IQ_CMD_FL0FBMAX 4 #define M_FW_IQ_CMD_FL0FBMAX 0x7 #define V_FW_IQ_CMD_FL0FBMAX(x) ((x) << S_FW_IQ_CMD_FL0FBMAX) #define G_FW_IQ_CMD_FL0FBMAX(x) \ (((x) >> S_FW_IQ_CMD_FL0FBMAX) & M_FW_IQ_CMD_FL0FBMAX) #define S_FW_IQ_CMD_FL0CIDXFTHRESHO 3 #define M_FW_IQ_CMD_FL0CIDXFTHRESHO 0x1 #define V_FW_IQ_CMD_FL0CIDXFTHRESHO(x) ((x) << S_FW_IQ_CMD_FL0CIDXFTHRESHO) #define G_FW_IQ_CMD_FL0CIDXFTHRESHO(x) \ (((x) >> S_FW_IQ_CMD_FL0CIDXFTHRESHO) & M_FW_IQ_CMD_FL0CIDXFTHRESHO) #define F_FW_IQ_CMD_FL0CIDXFTHRESHO V_FW_IQ_CMD_FL0CIDXFTHRESHO(1U) #define S_FW_IQ_CMD_FL0CIDXFTHRESH 0 #define M_FW_IQ_CMD_FL0CIDXFTHRESH 0x7 #define V_FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << S_FW_IQ_CMD_FL0CIDXFTHRESH) #define G_FW_IQ_CMD_FL0CIDXFTHRESH(x) \ (((x) >> S_FW_IQ_CMD_FL0CIDXFTHRESH) & M_FW_IQ_CMD_FL0CIDXFTHRESH) #define S_FW_IQ_CMD_FL1CNGCHMAP 20 #define M_FW_IQ_CMD_FL1CNGCHMAP 0xf #define V_FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL1CNGCHMAP) #define G_FW_IQ_CMD_FL1CNGCHMAP(x) \ (((x) >> S_FW_IQ_CMD_FL1CNGCHMAP) & M_FW_IQ_CMD_FL1CNGCHMAP) #define S_FW_IQ_CMD_FL1CONGDROP 16 #define M_FW_IQ_CMD_FL1CONGDROP 0x1 #define V_FW_IQ_CMD_FL1CONGDROP(x) ((x) << S_FW_IQ_CMD_FL1CONGDROP) #define G_FW_IQ_CMD_FL1CONGDROP(x) \ (((x) >> S_FW_IQ_CMD_FL1CONGDROP) & M_FW_IQ_CMD_FL1CONGDROP) #define F_FW_IQ_CMD_FL1CONGDROP V_FW_IQ_CMD_FL1CONGDROP(1U) #define S_FW_IQ_CMD_FL1CACHELOCK 15 #define M_FW_IQ_CMD_FL1CACHELOCK 0x1 #define V_FW_IQ_CMD_FL1CACHELOCK(x) ((x) << S_FW_IQ_CMD_FL1CACHELOCK) #define G_FW_IQ_CMD_FL1CACHELOCK(x) \ (((x) >> S_FW_IQ_CMD_FL1CACHELOCK) & M_FW_IQ_CMD_FL1CACHELOCK) #define F_FW_IQ_CMD_FL1CACHELOCK V_FW_IQ_CMD_FL1CACHELOCK(1U) #define S_FW_IQ_CMD_FL1DBP 14 #define M_FW_IQ_CMD_FL1DBP 0x1 #define V_FW_IQ_CMD_FL1DBP(x) ((x) << S_FW_IQ_CMD_FL1DBP) #define G_FW_IQ_CMD_FL1DBP(x) \ (((x) >> S_FW_IQ_CMD_FL1DBP) & M_FW_IQ_CMD_FL1DBP) #define F_FW_IQ_CMD_FL1DBP V_FW_IQ_CMD_FL1DBP(1U) #define S_FW_IQ_CMD_FL1DATANS 13 #define M_FW_IQ_CMD_FL1DATANS 0x1 #define V_FW_IQ_CMD_FL1DATANS(x) ((x) << S_FW_IQ_CMD_FL1DATANS) #define G_FW_IQ_CMD_FL1DATANS(x) \ (((x) >> S_FW_IQ_CMD_FL1DATANS) & M_FW_IQ_CMD_FL1DATANS) #define F_FW_IQ_CMD_FL1DATANS V_FW_IQ_CMD_FL1DATANS(1U) #define S_FW_IQ_CMD_FL1DATARO 12 #define M_FW_IQ_CMD_FL1DATARO 0x1 #define V_FW_IQ_CMD_FL1DATARO(x) ((x) << S_FW_IQ_CMD_FL1DATARO) #define G_FW_IQ_CMD_FL1DATARO(x) \ (((x) >> S_FW_IQ_CMD_FL1DATARO) & M_FW_IQ_CMD_FL1DATARO) #define F_FW_IQ_CMD_FL1DATARO V_FW_IQ_CMD_FL1DATARO(1U) #define S_FW_IQ_CMD_FL1CONGCIF 11 #define M_FW_IQ_CMD_FL1CONGCIF 0x1 #define V_FW_IQ_CMD_FL1CONGCIF(x) ((x) << S_FW_IQ_CMD_FL1CONGCIF) #define G_FW_IQ_CMD_FL1CONGCIF(x) \ (((x) >> S_FW_IQ_CMD_FL1CONGCIF) & M_FW_IQ_CMD_FL1CONGCIF) #define F_FW_IQ_CMD_FL1CONGCIF V_FW_IQ_CMD_FL1CONGCIF(1U) #define S_FW_IQ_CMD_FL1ONCHIP 10 #define M_FW_IQ_CMD_FL1ONCHIP 0x1 #define V_FW_IQ_CMD_FL1ONCHIP(x) ((x) << S_FW_IQ_CMD_FL1ONCHIP) #define G_FW_IQ_CMD_FL1ONCHIP(x) \ (((x) >> S_FW_IQ_CMD_FL1ONCHIP) & M_FW_IQ_CMD_FL1ONCHIP) #define F_FW_IQ_CMD_FL1ONCHIP V_FW_IQ_CMD_FL1ONCHIP(1U) #define S_FW_IQ_CMD_FL1STATUSPGNS 9 #define M_FW_IQ_CMD_FL1STATUSPGNS 0x1 #define V_FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << S_FW_IQ_CMD_FL1STATUSPGNS) #define G_FW_IQ_CMD_FL1STATUSPGNS(x) \ (((x) >> S_FW_IQ_CMD_FL1STATUSPGNS) & M_FW_IQ_CMD_FL1STATUSPGNS) #define F_FW_IQ_CMD_FL1STATUSPGNS V_FW_IQ_CMD_FL1STATUSPGNS(1U) #define S_FW_IQ_CMD_FL1STATUSPGRO 8 #define M_FW_IQ_CMD_FL1STATUSPGRO 0x1 #define V_FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << S_FW_IQ_CMD_FL1STATUSPGRO) #define G_FW_IQ_CMD_FL1STATUSPGRO(x) \ (((x) >> S_FW_IQ_CMD_FL1STATUSPGRO) & M_FW_IQ_CMD_FL1STATUSPGRO) #define F_FW_IQ_CMD_FL1STATUSPGRO V_FW_IQ_CMD_FL1STATUSPGRO(1U) #define S_FW_IQ_CMD_FL1FETCHNS 7 #define M_FW_IQ_CMD_FL1FETCHNS 0x1 #define V_FW_IQ_CMD_FL1FETCHNS(x) ((x) << S_FW_IQ_CMD_FL1FETCHNS) #define G_FW_IQ_CMD_FL1FETCHNS(x) \ (((x) >> S_FW_IQ_CMD_FL1FETCHNS) & M_FW_IQ_CMD_FL1FETCHNS) #define F_FW_IQ_CMD_FL1FETCHNS V_FW_IQ_CMD_FL1FETCHNS(1U) #define S_FW_IQ_CMD_FL1FETCHRO 6 #define M_FW_IQ_CMD_FL1FETCHRO 0x1 #define V_FW_IQ_CMD_FL1FETCHRO(x) ((x) << S_FW_IQ_CMD_FL1FETCHRO) #define G_FW_IQ_CMD_FL1FETCHRO(x) \ (((x) >> S_FW_IQ_CMD_FL1FETCHRO) & M_FW_IQ_CMD_FL1FETCHRO) #define F_FW_IQ_CMD_FL1FETCHRO V_FW_IQ_CMD_FL1FETCHRO(1U) #define S_FW_IQ_CMD_FL1HOSTFCMODE 4 #define M_FW_IQ_CMD_FL1HOSTFCMODE 0x3 #define V_FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << S_FW_IQ_CMD_FL1HOSTFCMODE) #define G_FW_IQ_CMD_FL1HOSTFCMODE(x) \ (((x) >> S_FW_IQ_CMD_FL1HOSTFCMODE) & M_FW_IQ_CMD_FL1HOSTFCMODE) #define S_FW_IQ_CMD_FL1CPRIO 3 #define M_FW_IQ_CMD_FL1CPRIO 0x1 #define V_FW_IQ_CMD_FL1CPRIO(x) ((x) << S_FW_IQ_CMD_FL1CPRIO) #define G_FW_IQ_CMD_FL1CPRIO(x) \ (((x) >> S_FW_IQ_CMD_FL1CPRIO) & M_FW_IQ_CMD_FL1CPRIO) #define F_FW_IQ_CMD_FL1CPRIO V_FW_IQ_CMD_FL1CPRIO(1U) #define S_FW_IQ_CMD_FL1PADEN 2 #define M_FW_IQ_CMD_FL1PADEN 0x1 #define V_FW_IQ_CMD_FL1PADEN(x) ((x) << S_FW_IQ_CMD_FL1PADEN) #define G_FW_IQ_CMD_FL1PADEN(x) \ (((x) >> S_FW_IQ_CMD_FL1PADEN) & M_FW_IQ_CMD_FL1PADEN) #define F_FW_IQ_CMD_FL1PADEN V_FW_IQ_CMD_FL1PADEN(1U) #define S_FW_IQ_CMD_FL1PACKEN 1 #define M_FW_IQ_CMD_FL1PACKEN 0x1 #define V_FW_IQ_CMD_FL1PACKEN(x) ((x) << S_FW_IQ_CMD_FL1PACKEN) #define G_FW_IQ_CMD_FL1PACKEN(x) \ (((x) >> S_FW_IQ_CMD_FL1PACKEN) & M_FW_IQ_CMD_FL1PACKEN) #define F_FW_IQ_CMD_FL1PACKEN V_FW_IQ_CMD_FL1PACKEN(1U) #define S_FW_IQ_CMD_FL1CONGEN 0 #define M_FW_IQ_CMD_FL1CONGEN 0x1 #define V_FW_IQ_CMD_FL1CONGEN(x) ((x) << S_FW_IQ_CMD_FL1CONGEN) #define G_FW_IQ_CMD_FL1CONGEN(x) \ (((x) >> S_FW_IQ_CMD_FL1CONGEN) & M_FW_IQ_CMD_FL1CONGEN) #define F_FW_IQ_CMD_FL1CONGEN V_FW_IQ_CMD_FL1CONGEN(1U) #define S_FW_IQ_CMD_FL1DCAEN 15 #define M_FW_IQ_CMD_FL1DCAEN 0x1 #define V_FW_IQ_CMD_FL1DCAEN(x) ((x) << S_FW_IQ_CMD_FL1DCAEN) #define G_FW_IQ_CMD_FL1DCAEN(x) \ (((x) >> S_FW_IQ_CMD_FL1DCAEN) & M_FW_IQ_CMD_FL1DCAEN) #define F_FW_IQ_CMD_FL1DCAEN V_FW_IQ_CMD_FL1DCAEN(1U) #define S_FW_IQ_CMD_FL1DCACPU 10 #define M_FW_IQ_CMD_FL1DCACPU 0x1f #define V_FW_IQ_CMD_FL1DCACPU(x) ((x) << S_FW_IQ_CMD_FL1DCACPU) #define G_FW_IQ_CMD_FL1DCACPU(x) \ (((x) >> S_FW_IQ_CMD_FL1DCACPU) & M_FW_IQ_CMD_FL1DCACPU) #define S_FW_IQ_CMD_FL1FBMIN 7 #define M_FW_IQ_CMD_FL1FBMIN 0x7 #define V_FW_IQ_CMD_FL1FBMIN(x) ((x) << S_FW_IQ_CMD_FL1FBMIN) #define G_FW_IQ_CMD_FL1FBMIN(x) \ (((x) >> S_FW_IQ_CMD_FL1FBMIN) & M_FW_IQ_CMD_FL1FBMIN) #define S_FW_IQ_CMD_FL1FBMAX 4 #define M_FW_IQ_CMD_FL1FBMAX 0x7 #define V_FW_IQ_CMD_FL1FBMAX(x) ((x) << S_FW_IQ_CMD_FL1FBMAX) #define G_FW_IQ_CMD_FL1FBMAX(x) \ (((x) >> S_FW_IQ_CMD_FL1FBMAX) & M_FW_IQ_CMD_FL1FBMAX) #define S_FW_IQ_CMD_FL1CIDXFTHRESHO 3 #define M_FW_IQ_CMD_FL1CIDXFTHRESHO 0x1 #define V_FW_IQ_CMD_FL1CIDXFTHRESHO(x) ((x) << S_FW_IQ_CMD_FL1CIDXFTHRESHO) #define G_FW_IQ_CMD_FL1CIDXFTHRESHO(x) \ (((x) >> S_FW_IQ_CMD_FL1CIDXFTHRESHO) & M_FW_IQ_CMD_FL1CIDXFTHRESHO) #define F_FW_IQ_CMD_FL1CIDXFTHRESHO V_FW_IQ_CMD_FL1CIDXFTHRESHO(1U) #define S_FW_IQ_CMD_FL1CIDXFTHRESH 0 #define M_FW_IQ_CMD_FL1CIDXFTHRESH 0x7 #define V_FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << S_FW_IQ_CMD_FL1CIDXFTHRESH) #define G_FW_IQ_CMD_FL1CIDXFTHRESH(x) \ (((x) >> S_FW_IQ_CMD_FL1CIDXFTHRESH) & M_FW_IQ_CMD_FL1CIDXFTHRESH) struct fw_eq_mngt_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; __be32 cmpliqid_eqid; __be32 physeqid_pkd; __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; }; #define S_FW_EQ_MNGT_CMD_PFN 8 #define M_FW_EQ_MNGT_CMD_PFN 0x7 #define V_FW_EQ_MNGT_CMD_PFN(x) ((x) << S_FW_EQ_MNGT_CMD_PFN) #define G_FW_EQ_MNGT_CMD_PFN(x) \ (((x) >> S_FW_EQ_MNGT_CMD_PFN) & M_FW_EQ_MNGT_CMD_PFN) #define S_FW_EQ_MNGT_CMD_VFN 0 #define M_FW_EQ_MNGT_CMD_VFN 0xff #define V_FW_EQ_MNGT_CMD_VFN(x) ((x) << S_FW_EQ_MNGT_CMD_VFN) #define G_FW_EQ_MNGT_CMD_VFN(x) \ (((x) >> S_FW_EQ_MNGT_CMD_VFN) & M_FW_EQ_MNGT_CMD_VFN) #define S_FW_EQ_MNGT_CMD_ALLOC 31 #define M_FW_EQ_MNGT_CMD_ALLOC 0x1 #define V_FW_EQ_MNGT_CMD_ALLOC(x) ((x) << S_FW_EQ_MNGT_CMD_ALLOC) #define G_FW_EQ_MNGT_CMD_ALLOC(x) \ (((x) >> S_FW_EQ_MNGT_CMD_ALLOC) & M_FW_EQ_MNGT_CMD_ALLOC) #define F_FW_EQ_MNGT_CMD_ALLOC V_FW_EQ_MNGT_CMD_ALLOC(1U) #define S_FW_EQ_MNGT_CMD_FREE 30 #define M_FW_EQ_MNGT_CMD_FREE 0x1 #define V_FW_EQ_MNGT_CMD_FREE(x) ((x) << S_FW_EQ_MNGT_CMD_FREE) #define G_FW_EQ_MNGT_CMD_FREE(x) \ (((x) >> S_FW_EQ_MNGT_CMD_FREE) & M_FW_EQ_MNGT_CMD_FREE) #define F_FW_EQ_MNGT_CMD_FREE V_FW_EQ_MNGT_CMD_FREE(1U) #define S_FW_EQ_MNGT_CMD_MODIFY 29 #define M_FW_EQ_MNGT_CMD_MODIFY 0x1 #define V_FW_EQ_MNGT_CMD_MODIFY(x) ((x) << S_FW_EQ_MNGT_CMD_MODIFY) #define G_FW_EQ_MNGT_CMD_MODIFY(x) \ (((x) >> S_FW_EQ_MNGT_CMD_MODIFY) & M_FW_EQ_MNGT_CMD_MODIFY) #define F_FW_EQ_MNGT_CMD_MODIFY V_FW_EQ_MNGT_CMD_MODIFY(1U) #define S_FW_EQ_MNGT_CMD_EQSTART 28 #define M_FW_EQ_MNGT_CMD_EQSTART 0x1 #define V_FW_EQ_MNGT_CMD_EQSTART(x) ((x) << S_FW_EQ_MNGT_CMD_EQSTART) #define G_FW_EQ_MNGT_CMD_EQSTART(x) \ (((x) >> S_FW_EQ_MNGT_CMD_EQSTART) & M_FW_EQ_MNGT_CMD_EQSTART) #define F_FW_EQ_MNGT_CMD_EQSTART V_FW_EQ_MNGT_CMD_EQSTART(1U) #define S_FW_EQ_MNGT_CMD_EQSTOP 27 #define M_FW_EQ_MNGT_CMD_EQSTOP 0x1 #define V_FW_EQ_MNGT_CMD_EQSTOP(x) ((x) << S_FW_EQ_MNGT_CMD_EQSTOP) #define G_FW_EQ_MNGT_CMD_EQSTOP(x) \ (((x) >> S_FW_EQ_MNGT_CMD_EQSTOP) & M_FW_EQ_MNGT_CMD_EQSTOP) #define F_FW_EQ_MNGT_CMD_EQSTOP V_FW_EQ_MNGT_CMD_EQSTOP(1U) #define S_FW_EQ_MNGT_CMD_CMPLIQID 20 #define M_FW_EQ_MNGT_CMD_CMPLIQID 0xfff #define V_FW_EQ_MNGT_CMD_CMPLIQID(x) ((x) << S_FW_EQ_MNGT_CMD_CMPLIQID) #define G_FW_EQ_MNGT_CMD_CMPLIQID(x) \ (((x) >> S_FW_EQ_MNGT_CMD_CMPLIQID) & M_FW_EQ_MNGT_CMD_CMPLIQID) #define S_FW_EQ_MNGT_CMD_EQID 0 #define M_FW_EQ_MNGT_CMD_EQID 0xfffff #define V_FW_EQ_MNGT_CMD_EQID(x) ((x) << S_FW_EQ_MNGT_CMD_EQID) #define G_FW_EQ_MNGT_CMD_EQID(x) \ (((x) >> S_FW_EQ_MNGT_CMD_EQID) & M_FW_EQ_MNGT_CMD_EQID) #define S_FW_EQ_MNGT_CMD_PHYSEQID 0 #define M_FW_EQ_MNGT_CMD_PHYSEQID 0xfffff #define V_FW_EQ_MNGT_CMD_PHYSEQID(x) ((x) << S_FW_EQ_MNGT_CMD_PHYSEQID) #define G_FW_EQ_MNGT_CMD_PHYSEQID(x) \ (((x) >> S_FW_EQ_MNGT_CMD_PHYSEQID) & M_FW_EQ_MNGT_CMD_PHYSEQID) #define S_FW_EQ_MNGT_CMD_FETCHSZM 26 #define M_FW_EQ_MNGT_CMD_FETCHSZM 0x1 #define V_FW_EQ_MNGT_CMD_FETCHSZM(x) ((x) << S_FW_EQ_MNGT_CMD_FETCHSZM) #define G_FW_EQ_MNGT_CMD_FETCHSZM(x) \ (((x) >> S_FW_EQ_MNGT_CMD_FETCHSZM) & M_FW_EQ_MNGT_CMD_FETCHSZM) #define F_FW_EQ_MNGT_CMD_FETCHSZM V_FW_EQ_MNGT_CMD_FETCHSZM(1U) #define S_FW_EQ_MNGT_CMD_STATUSPGNS 25 #define M_FW_EQ_MNGT_CMD_STATUSPGNS 0x1 #define V_FW_EQ_MNGT_CMD_STATUSPGNS(x) ((x) << S_FW_EQ_MNGT_CMD_STATUSPGNS) #define G_FW_EQ_MNGT_CMD_STATUSPGNS(x) \ (((x) >> S_FW_EQ_MNGT_CMD_STATUSPGNS) & M_FW_EQ_MNGT_CMD_STATUSPGNS) #define F_FW_EQ_MNGT_CMD_STATUSPGNS V_FW_EQ_MNGT_CMD_STATUSPGNS(1U) #define S_FW_EQ_MNGT_CMD_STATUSPGRO 24 #define M_FW_EQ_MNGT_CMD_STATUSPGRO 0x1 #define V_FW_EQ_MNGT_CMD_STATUSPGRO(x) ((x) << S_FW_EQ_MNGT_CMD_STATUSPGRO) #define G_FW_EQ_MNGT_CMD_STATUSPGRO(x) \ (((x) >> S_FW_EQ_MNGT_CMD_STATUSPGRO) & M_FW_EQ_MNGT_CMD_STATUSPGRO) #define F_FW_EQ_MNGT_CMD_STATUSPGRO V_FW_EQ_MNGT_CMD_STATUSPGRO(1U) #define S_FW_EQ_MNGT_CMD_FETCHNS 23 #define M_FW_EQ_MNGT_CMD_FETCHNS 0x1 #define V_FW_EQ_MNGT_CMD_FETCHNS(x) ((x) << S_FW_EQ_MNGT_CMD_FETCHNS) #define G_FW_EQ_MNGT_CMD_FETCHNS(x) \ (((x) >> S_FW_EQ_MNGT_CMD_FETCHNS) & M_FW_EQ_MNGT_CMD_FETCHNS) #define F_FW_EQ_MNGT_CMD_FETCHNS V_FW_EQ_MNGT_CMD_FETCHNS(1U) #define S_FW_EQ_MNGT_CMD_FETCHRO 22 #define M_FW_EQ_MNGT_CMD_FETCHRO 0x1 #define V_FW_EQ_MNGT_CMD_FETCHRO(x) ((x) << S_FW_EQ_MNGT_CMD_FETCHRO) #define G_FW_EQ_MNGT_CMD_FETCHRO(x) \ (((x) >> S_FW_EQ_MNGT_CMD_FETCHRO) & M_FW_EQ_MNGT_CMD_FETCHRO) #define F_FW_EQ_MNGT_CMD_FETCHRO V_FW_EQ_MNGT_CMD_FETCHRO(1U) #define S_FW_EQ_MNGT_CMD_HOSTFCMODE 20 #define M_FW_EQ_MNGT_CMD_HOSTFCMODE 0x3 #define V_FW_EQ_MNGT_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_MNGT_CMD_HOSTFCMODE) #define G_FW_EQ_MNGT_CMD_HOSTFCMODE(x) \ (((x) >> S_FW_EQ_MNGT_CMD_HOSTFCMODE) & M_FW_EQ_MNGT_CMD_HOSTFCMODE) #define S_FW_EQ_MNGT_CMD_CPRIO 19 #define M_FW_EQ_MNGT_CMD_CPRIO 0x1 #define V_FW_EQ_MNGT_CMD_CPRIO(x) ((x) << S_FW_EQ_MNGT_CMD_CPRIO) #define G_FW_EQ_MNGT_CMD_CPRIO(x) \ (((x) >> S_FW_EQ_MNGT_CMD_CPRIO) & M_FW_EQ_MNGT_CMD_CPRIO) #define F_FW_EQ_MNGT_CMD_CPRIO V_FW_EQ_MNGT_CMD_CPRIO(1U) #define S_FW_EQ_MNGT_CMD_ONCHIP 18 #define M_FW_EQ_MNGT_CMD_ONCHIP 0x1 #define V_FW_EQ_MNGT_CMD_ONCHIP(x) ((x) << S_FW_EQ_MNGT_CMD_ONCHIP) #define G_FW_EQ_MNGT_CMD_ONCHIP(x) \ (((x) >> S_FW_EQ_MNGT_CMD_ONCHIP) & M_FW_EQ_MNGT_CMD_ONCHIP) #define F_FW_EQ_MNGT_CMD_ONCHIP V_FW_EQ_MNGT_CMD_ONCHIP(1U) #define S_FW_EQ_MNGT_CMD_PCIECHN 16 #define M_FW_EQ_MNGT_CMD_PCIECHN 0x3 #define V_FW_EQ_MNGT_CMD_PCIECHN(x) ((x) << S_FW_EQ_MNGT_CMD_PCIECHN) #define G_FW_EQ_MNGT_CMD_PCIECHN(x) \ (((x) >> S_FW_EQ_MNGT_CMD_PCIECHN) & M_FW_EQ_MNGT_CMD_PCIECHN) #define S_FW_EQ_MNGT_CMD_IQID 0 #define M_FW_EQ_MNGT_CMD_IQID 0xffff #define V_FW_EQ_MNGT_CMD_IQID(x) ((x) << S_FW_EQ_MNGT_CMD_IQID) #define G_FW_EQ_MNGT_CMD_IQID(x) \ (((x) >> S_FW_EQ_MNGT_CMD_IQID) & M_FW_EQ_MNGT_CMD_IQID) #define S_FW_EQ_MNGT_CMD_DCAEN 31 #define M_FW_EQ_MNGT_CMD_DCAEN 0x1 #define V_FW_EQ_MNGT_CMD_DCAEN(x) ((x) << S_FW_EQ_MNGT_CMD_DCAEN) #define G_FW_EQ_MNGT_CMD_DCAEN(x) \ (((x) >> S_FW_EQ_MNGT_CMD_DCAEN) & M_FW_EQ_MNGT_CMD_DCAEN) #define F_FW_EQ_MNGT_CMD_DCAEN V_FW_EQ_MNGT_CMD_DCAEN(1U) #define S_FW_EQ_MNGT_CMD_DCACPU 26 #define M_FW_EQ_MNGT_CMD_DCACPU 0x1f #define V_FW_EQ_MNGT_CMD_DCACPU(x) ((x) << S_FW_EQ_MNGT_CMD_DCACPU) #define G_FW_EQ_MNGT_CMD_DCACPU(x) \ (((x) >> S_FW_EQ_MNGT_CMD_DCACPU) & M_FW_EQ_MNGT_CMD_DCACPU) #define S_FW_EQ_MNGT_CMD_FBMIN 23 #define M_FW_EQ_MNGT_CMD_FBMIN 0x7 #define V_FW_EQ_MNGT_CMD_FBMIN(x) ((x) << S_FW_EQ_MNGT_CMD_FBMIN) #define G_FW_EQ_MNGT_CMD_FBMIN(x) \ (((x) >> S_FW_EQ_MNGT_CMD_FBMIN) & M_FW_EQ_MNGT_CMD_FBMIN) #define S_FW_EQ_MNGT_CMD_FBMAX 20 #define M_FW_EQ_MNGT_CMD_FBMAX 0x7 #define V_FW_EQ_MNGT_CMD_FBMAX(x) ((x) << S_FW_EQ_MNGT_CMD_FBMAX) #define G_FW_EQ_MNGT_CMD_FBMAX(x) \ (((x) >> S_FW_EQ_MNGT_CMD_FBMAX) & M_FW_EQ_MNGT_CMD_FBMAX) #define S_FW_EQ_MNGT_CMD_CIDXFTHRESHO 19 #define M_FW_EQ_MNGT_CMD_CIDXFTHRESHO 0x1 #define V_FW_EQ_MNGT_CMD_CIDXFTHRESHO(x) \ ((x) << S_FW_EQ_MNGT_CMD_CIDXFTHRESHO) #define G_FW_EQ_MNGT_CMD_CIDXFTHRESHO(x) \ (((x) >> S_FW_EQ_MNGT_CMD_CIDXFTHRESHO) & M_FW_EQ_MNGT_CMD_CIDXFTHRESHO) #define F_FW_EQ_MNGT_CMD_CIDXFTHRESHO V_FW_EQ_MNGT_CMD_CIDXFTHRESHO(1U) #define S_FW_EQ_MNGT_CMD_CIDXFTHRESH 16 #define M_FW_EQ_MNGT_CMD_CIDXFTHRESH 0x7 #define V_FW_EQ_MNGT_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_MNGT_CMD_CIDXFTHRESH) #define G_FW_EQ_MNGT_CMD_CIDXFTHRESH(x) \ (((x) >> S_FW_EQ_MNGT_CMD_CIDXFTHRESH) & M_FW_EQ_MNGT_CMD_CIDXFTHRESH) #define S_FW_EQ_MNGT_CMD_EQSIZE 0 #define M_FW_EQ_MNGT_CMD_EQSIZE 0xffff #define V_FW_EQ_MNGT_CMD_EQSIZE(x) ((x) << S_FW_EQ_MNGT_CMD_EQSIZE) #define G_FW_EQ_MNGT_CMD_EQSIZE(x) \ (((x) >> S_FW_EQ_MNGT_CMD_EQSIZE) & M_FW_EQ_MNGT_CMD_EQSIZE) struct fw_eq_eth_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; __be32 eqid_pkd; __be32 physeqid_pkd; __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; __be32 autoequiqe_to_viid; __be32 r8_lo; __be64 r9; }; #define S_FW_EQ_ETH_CMD_PFN 8 #define M_FW_EQ_ETH_CMD_PFN 0x7 #define V_FW_EQ_ETH_CMD_PFN(x) ((x) << S_FW_EQ_ETH_CMD_PFN) #define G_FW_EQ_ETH_CMD_PFN(x) \ (((x) >> S_FW_EQ_ETH_CMD_PFN) & M_FW_EQ_ETH_CMD_PFN) #define S_FW_EQ_ETH_CMD_VFN 0 #define M_FW_EQ_ETH_CMD_VFN 0xff #define V_FW_EQ_ETH_CMD_VFN(x) ((x) << S_FW_EQ_ETH_CMD_VFN) #define G_FW_EQ_ETH_CMD_VFN(x) \ (((x) >> S_FW_EQ_ETH_CMD_VFN) & M_FW_EQ_ETH_CMD_VFN) #define S_FW_EQ_ETH_CMD_ALLOC 31 #define M_FW_EQ_ETH_CMD_ALLOC 0x1 #define V_FW_EQ_ETH_CMD_ALLOC(x) ((x) << S_FW_EQ_ETH_CMD_ALLOC) #define G_FW_EQ_ETH_CMD_ALLOC(x) \ (((x) >> S_FW_EQ_ETH_CMD_ALLOC) & M_FW_EQ_ETH_CMD_ALLOC) #define F_FW_EQ_ETH_CMD_ALLOC V_FW_EQ_ETH_CMD_ALLOC(1U) #define S_FW_EQ_ETH_CMD_FREE 30 #define M_FW_EQ_ETH_CMD_FREE 0x1 #define V_FW_EQ_ETH_CMD_FREE(x) ((x) << S_FW_EQ_ETH_CMD_FREE) #define G_FW_EQ_ETH_CMD_FREE(x) \ (((x) >> S_FW_EQ_ETH_CMD_FREE) & M_FW_EQ_ETH_CMD_FREE) #define F_FW_EQ_ETH_CMD_FREE V_FW_EQ_ETH_CMD_FREE(1U) #define S_FW_EQ_ETH_CMD_MODIFY 29 #define M_FW_EQ_ETH_CMD_MODIFY 0x1 #define V_FW_EQ_ETH_CMD_MODIFY(x) ((x) << S_FW_EQ_ETH_CMD_MODIFY) #define G_FW_EQ_ETH_CMD_MODIFY(x) \ (((x) >> S_FW_EQ_ETH_CMD_MODIFY) & M_FW_EQ_ETH_CMD_MODIFY) #define F_FW_EQ_ETH_CMD_MODIFY V_FW_EQ_ETH_CMD_MODIFY(1U) #define S_FW_EQ_ETH_CMD_EQSTART 28 #define M_FW_EQ_ETH_CMD_EQSTART 0x1 #define V_FW_EQ_ETH_CMD_EQSTART(x) ((x) << S_FW_EQ_ETH_CMD_EQSTART) #define G_FW_EQ_ETH_CMD_EQSTART(x) \ (((x) >> S_FW_EQ_ETH_CMD_EQSTART) & M_FW_EQ_ETH_CMD_EQSTART) #define F_FW_EQ_ETH_CMD_EQSTART V_FW_EQ_ETH_CMD_EQSTART(1U) #define S_FW_EQ_ETH_CMD_EQSTOP 27 #define M_FW_EQ_ETH_CMD_EQSTOP 0x1 #define V_FW_EQ_ETH_CMD_EQSTOP(x) ((x) << S_FW_EQ_ETH_CMD_EQSTOP) #define G_FW_EQ_ETH_CMD_EQSTOP(x) \ (((x) >> S_FW_EQ_ETH_CMD_EQSTOP) & M_FW_EQ_ETH_CMD_EQSTOP) #define F_FW_EQ_ETH_CMD_EQSTOP V_FW_EQ_ETH_CMD_EQSTOP(1U) #define S_FW_EQ_ETH_CMD_EQID 0 #define M_FW_EQ_ETH_CMD_EQID 0xfffff #define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID) #define G_FW_EQ_ETH_CMD_EQID(x) \ (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID) #define S_FW_EQ_ETH_CMD_PHYSEQID 0 #define M_FW_EQ_ETH_CMD_PHYSEQID 0xfffff #define V_FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << S_FW_EQ_ETH_CMD_PHYSEQID) #define G_FW_EQ_ETH_CMD_PHYSEQID(x) \ (((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID) #define S_FW_EQ_ETH_CMD_FETCHSZM 26 #define M_FW_EQ_ETH_CMD_FETCHSZM 0x1 #define V_FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << S_FW_EQ_ETH_CMD_FETCHSZM) #define G_FW_EQ_ETH_CMD_FETCHSZM(x) \ (((x) >> S_FW_EQ_ETH_CMD_FETCHSZM) & M_FW_EQ_ETH_CMD_FETCHSZM) #define F_FW_EQ_ETH_CMD_FETCHSZM V_FW_EQ_ETH_CMD_FETCHSZM(1U) #define S_FW_EQ_ETH_CMD_STATUSPGNS 25 #define M_FW_EQ_ETH_CMD_STATUSPGNS 0x1 #define V_FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << S_FW_EQ_ETH_CMD_STATUSPGNS) #define G_FW_EQ_ETH_CMD_STATUSPGNS(x) \ (((x) >> S_FW_EQ_ETH_CMD_STATUSPGNS) & M_FW_EQ_ETH_CMD_STATUSPGNS) #define F_FW_EQ_ETH_CMD_STATUSPGNS V_FW_EQ_ETH_CMD_STATUSPGNS(1U) #define S_FW_EQ_ETH_CMD_STATUSPGRO 24 #define M_FW_EQ_ETH_CMD_STATUSPGRO 0x1 #define V_FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << S_FW_EQ_ETH_CMD_STATUSPGRO) #define G_FW_EQ_ETH_CMD_STATUSPGRO(x) \ (((x) >> S_FW_EQ_ETH_CMD_STATUSPGRO) & M_FW_EQ_ETH_CMD_STATUSPGRO) #define F_FW_EQ_ETH_CMD_STATUSPGRO V_FW_EQ_ETH_CMD_STATUSPGRO(1U) #define S_FW_EQ_ETH_CMD_FETCHNS 23 #define M_FW_EQ_ETH_CMD_FETCHNS 0x1 #define V_FW_EQ_ETH_CMD_FETCHNS(x) ((x) << S_FW_EQ_ETH_CMD_FETCHNS) #define G_FW_EQ_ETH_CMD_FETCHNS(x) \ (((x) >> S_FW_EQ_ETH_CMD_FETCHNS) & M_FW_EQ_ETH_CMD_FETCHNS) #define F_FW_EQ_ETH_CMD_FETCHNS V_FW_EQ_ETH_CMD_FETCHNS(1U) #define S_FW_EQ_ETH_CMD_FETCHRO 22 #define M_FW_EQ_ETH_CMD_FETCHRO 0x1 #define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO) #define G_FW_EQ_ETH_CMD_FETCHRO(x) \ (((x) >> S_FW_EQ_ETH_CMD_FETCHRO) & M_FW_EQ_ETH_CMD_FETCHRO) #define F_FW_EQ_ETH_CMD_FETCHRO V_FW_EQ_ETH_CMD_FETCHRO(1U) #define S_FW_EQ_ETH_CMD_HOSTFCMODE 20 #define M_FW_EQ_ETH_CMD_HOSTFCMODE 0x3 #define V_FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_ETH_CMD_HOSTFCMODE) #define G_FW_EQ_ETH_CMD_HOSTFCMODE(x) \ (((x) >> S_FW_EQ_ETH_CMD_HOSTFCMODE) & M_FW_EQ_ETH_CMD_HOSTFCMODE) #define S_FW_EQ_ETH_CMD_CPRIO 19 #define M_FW_EQ_ETH_CMD_CPRIO 0x1 #define V_FW_EQ_ETH_CMD_CPRIO(x) ((x) << S_FW_EQ_ETH_CMD_CPRIO) #define G_FW_EQ_ETH_CMD_CPRIO(x) \ (((x) >> S_FW_EQ_ETH_CMD_CPRIO) & M_FW_EQ_ETH_CMD_CPRIO) #define F_FW_EQ_ETH_CMD_CPRIO V_FW_EQ_ETH_CMD_CPRIO(1U) #define S_FW_EQ_ETH_CMD_ONCHIP 18 #define M_FW_EQ_ETH_CMD_ONCHIP 0x1 #define V_FW_EQ_ETH_CMD_ONCHIP(x) ((x) << S_FW_EQ_ETH_CMD_ONCHIP) #define G_FW_EQ_ETH_CMD_ONCHIP(x) \ (((x) >> S_FW_EQ_ETH_CMD_ONCHIP) & M_FW_EQ_ETH_CMD_ONCHIP) #define F_FW_EQ_ETH_CMD_ONCHIP V_FW_EQ_ETH_CMD_ONCHIP(1U) #define S_FW_EQ_ETH_CMD_PCIECHN 16 #define M_FW_EQ_ETH_CMD_PCIECHN 0x3 #define V_FW_EQ_ETH_CMD_PCIECHN(x) ((x) << S_FW_EQ_ETH_CMD_PCIECHN) #define G_FW_EQ_ETH_CMD_PCIECHN(x) \ (((x) >> S_FW_EQ_ETH_CMD_PCIECHN) & M_FW_EQ_ETH_CMD_PCIECHN) #define S_FW_EQ_ETH_CMD_IQID 0 #define M_FW_EQ_ETH_CMD_IQID 0xffff #define V_FW_EQ_ETH_CMD_IQID(x) ((x) << S_FW_EQ_ETH_CMD_IQID) #define G_FW_EQ_ETH_CMD_IQID(x) \ (((x) >> S_FW_EQ_ETH_CMD_IQID) & M_FW_EQ_ETH_CMD_IQID) #define S_FW_EQ_ETH_CMD_DCAEN 31 #define M_FW_EQ_ETH_CMD_DCAEN 0x1 #define V_FW_EQ_ETH_CMD_DCAEN(x) ((x) << S_FW_EQ_ETH_CMD_DCAEN) #define G_FW_EQ_ETH_CMD_DCAEN(x) \ (((x) >> S_FW_EQ_ETH_CMD_DCAEN) & M_FW_EQ_ETH_CMD_DCAEN) #define F_FW_EQ_ETH_CMD_DCAEN V_FW_EQ_ETH_CMD_DCAEN(1U) #define S_FW_EQ_ETH_CMD_DCACPU 26 #define M_FW_EQ_ETH_CMD_DCACPU 0x1f #define V_FW_EQ_ETH_CMD_DCACPU(x) ((x) << S_FW_EQ_ETH_CMD_DCACPU) #define G_FW_EQ_ETH_CMD_DCACPU(x) \ (((x) >> S_FW_EQ_ETH_CMD_DCACPU) & M_FW_EQ_ETH_CMD_DCACPU) #define S_FW_EQ_ETH_CMD_FBMIN 23 #define M_FW_EQ_ETH_CMD_FBMIN 0x7 #define V_FW_EQ_ETH_CMD_FBMIN(x) ((x) << S_FW_EQ_ETH_CMD_FBMIN) #define G_FW_EQ_ETH_CMD_FBMIN(x) \ (((x) >> S_FW_EQ_ETH_CMD_FBMIN) & M_FW_EQ_ETH_CMD_FBMIN) #define S_FW_EQ_ETH_CMD_FBMAX 20 #define M_FW_EQ_ETH_CMD_FBMAX 0x7 #define V_FW_EQ_ETH_CMD_FBMAX(x) ((x) << S_FW_EQ_ETH_CMD_FBMAX) #define G_FW_EQ_ETH_CMD_FBMAX(x) \ (((x) >> S_FW_EQ_ETH_CMD_FBMAX) & M_FW_EQ_ETH_CMD_FBMAX) #define S_FW_EQ_ETH_CMD_CIDXFTHRESHO 19 #define M_FW_EQ_ETH_CMD_CIDXFTHRESHO 0x1 #define V_FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << S_FW_EQ_ETH_CMD_CIDXFTHRESHO) #define G_FW_EQ_ETH_CMD_CIDXFTHRESHO(x) \ (((x) >> S_FW_EQ_ETH_CMD_CIDXFTHRESHO) & M_FW_EQ_ETH_CMD_CIDXFTHRESHO) #define F_FW_EQ_ETH_CMD_CIDXFTHRESHO V_FW_EQ_ETH_CMD_CIDXFTHRESHO(1U) #define S_FW_EQ_ETH_CMD_CIDXFTHRESH 16 #define M_FW_EQ_ETH_CMD_CIDXFTHRESH 0x7 #define V_FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_ETH_CMD_CIDXFTHRESH) #define G_FW_EQ_ETH_CMD_CIDXFTHRESH(x) \ (((x) >> S_FW_EQ_ETH_CMD_CIDXFTHRESH) & M_FW_EQ_ETH_CMD_CIDXFTHRESH) #define S_FW_EQ_ETH_CMD_EQSIZE 0 #define M_FW_EQ_ETH_CMD_EQSIZE 0xffff #define V_FW_EQ_ETH_CMD_EQSIZE(x) ((x) << S_FW_EQ_ETH_CMD_EQSIZE) #define G_FW_EQ_ETH_CMD_EQSIZE(x) \ (((x) >> S_FW_EQ_ETH_CMD_EQSIZE) & M_FW_EQ_ETH_CMD_EQSIZE) #define S_FW_EQ_ETH_CMD_AUTOEQUIQE 31 #define M_FW_EQ_ETH_CMD_AUTOEQUIQE 0x1 #define V_FW_EQ_ETH_CMD_AUTOEQUIQE(x) ((x) << S_FW_EQ_ETH_CMD_AUTOEQUIQE) #define G_FW_EQ_ETH_CMD_AUTOEQUIQE(x) \ (((x) >> S_FW_EQ_ETH_CMD_AUTOEQUIQE) & M_FW_EQ_ETH_CMD_AUTOEQUIQE) #define F_FW_EQ_ETH_CMD_AUTOEQUIQE V_FW_EQ_ETH_CMD_AUTOEQUIQE(1U) #define S_FW_EQ_ETH_CMD_AUTOEQUEQE 30 #define M_FW_EQ_ETH_CMD_AUTOEQUEQE 0x1 #define V_FW_EQ_ETH_CMD_AUTOEQUEQE(x) ((x) << S_FW_EQ_ETH_CMD_AUTOEQUEQE) #define G_FW_EQ_ETH_CMD_AUTOEQUEQE(x) \ (((x) >> S_FW_EQ_ETH_CMD_AUTOEQUEQE) & M_FW_EQ_ETH_CMD_AUTOEQUEQE) #define F_FW_EQ_ETH_CMD_AUTOEQUEQE V_FW_EQ_ETH_CMD_AUTOEQUEQE(1U) #define S_FW_EQ_ETH_CMD_VIID 16 #define M_FW_EQ_ETH_CMD_VIID 0xfff #define V_FW_EQ_ETH_CMD_VIID(x) ((x) << S_FW_EQ_ETH_CMD_VIID) #define G_FW_EQ_ETH_CMD_VIID(x) \ (((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID) struct fw_eq_ctrl_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; __be32 cmpliqid_eqid; __be32 physeqid_pkd; __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; }; #define S_FW_EQ_CTRL_CMD_PFN 8 #define M_FW_EQ_CTRL_CMD_PFN 0x7 #define V_FW_EQ_CTRL_CMD_PFN(x) ((x) << S_FW_EQ_CTRL_CMD_PFN) #define G_FW_EQ_CTRL_CMD_PFN(x) \ (((x) >> S_FW_EQ_CTRL_CMD_PFN) & M_FW_EQ_CTRL_CMD_PFN) #define S_FW_EQ_CTRL_CMD_VFN 0 #define M_FW_EQ_CTRL_CMD_VFN 0xff #define V_FW_EQ_CTRL_CMD_VFN(x) ((x) << S_FW_EQ_CTRL_CMD_VFN) #define G_FW_EQ_CTRL_CMD_VFN(x) \ (((x) >> S_FW_EQ_CTRL_CMD_VFN) & M_FW_EQ_CTRL_CMD_VFN) #define S_FW_EQ_CTRL_CMD_ALLOC 31 #define M_FW_EQ_CTRL_CMD_ALLOC 0x1 #define V_FW_EQ_CTRL_CMD_ALLOC(x) ((x) << S_FW_EQ_CTRL_CMD_ALLOC) #define G_FW_EQ_CTRL_CMD_ALLOC(x) \ (((x) >> S_FW_EQ_CTRL_CMD_ALLOC) & M_FW_EQ_CTRL_CMD_ALLOC) #define F_FW_EQ_CTRL_CMD_ALLOC V_FW_EQ_CTRL_CMD_ALLOC(1U) #define S_FW_EQ_CTRL_CMD_FREE 30 #define M_FW_EQ_CTRL_CMD_FREE 0x1 #define V_FW_EQ_CTRL_CMD_FREE(x) ((x) << S_FW_EQ_CTRL_CMD_FREE) #define G_FW_EQ_CTRL_CMD_FREE(x) \ (((x) >> S_FW_EQ_CTRL_CMD_FREE) & M_FW_EQ_CTRL_CMD_FREE) #define F_FW_EQ_CTRL_CMD_FREE V_FW_EQ_CTRL_CMD_FREE(1U) #define S_FW_EQ_CTRL_CMD_MODIFY 29 #define M_FW_EQ_CTRL_CMD_MODIFY 0x1 #define V_FW_EQ_CTRL_CMD_MODIFY(x) ((x) << S_FW_EQ_CTRL_CMD_MODIFY) #define G_FW_EQ_CTRL_CMD_MODIFY(x) \ (((x) >> S_FW_EQ_CTRL_CMD_MODIFY) & M_FW_EQ_CTRL_CMD_MODIFY) #define F_FW_EQ_CTRL_CMD_MODIFY V_FW_EQ_CTRL_CMD_MODIFY(1U) #define S_FW_EQ_CTRL_CMD_EQSTART 28 #define M_FW_EQ_CTRL_CMD_EQSTART 0x1 #define V_FW_EQ_CTRL_CMD_EQSTART(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTART) #define G_FW_EQ_CTRL_CMD_EQSTART(x) \ (((x) >> S_FW_EQ_CTRL_CMD_EQSTART) & M_FW_EQ_CTRL_CMD_EQSTART) #define F_FW_EQ_CTRL_CMD_EQSTART V_FW_EQ_CTRL_CMD_EQSTART(1U) #define S_FW_EQ_CTRL_CMD_EQSTOP 27 #define M_FW_EQ_CTRL_CMD_EQSTOP 0x1 #define V_FW_EQ_CTRL_CMD_EQSTOP(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTOP) #define G_FW_EQ_CTRL_CMD_EQSTOP(x) \ (((x) >> S_FW_EQ_CTRL_CMD_EQSTOP) & M_FW_EQ_CTRL_CMD_EQSTOP) #define F_FW_EQ_CTRL_CMD_EQSTOP V_FW_EQ_CTRL_CMD_EQSTOP(1U) #define S_FW_EQ_CTRL_CMD_CMPLIQID 20 #define M_FW_EQ_CTRL_CMD_CMPLIQID 0xfff #define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID) #define G_FW_EQ_CTRL_CMD_CMPLIQID(x) \ (((x) >> S_FW_EQ_CTRL_CMD_CMPLIQID) & M_FW_EQ_CTRL_CMD_CMPLIQID) #define S_FW_EQ_CTRL_CMD_EQID 0 #define M_FW_EQ_CTRL_CMD_EQID 0xfffff #define V_FW_EQ_CTRL_CMD_EQID(x) ((x) << S_FW_EQ_CTRL_CMD_EQID) #define G_FW_EQ_CTRL_CMD_EQID(x) \ (((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID) #define S_FW_EQ_CTRL_CMD_PHYSEQID 0 #define M_FW_EQ_CTRL_CMD_PHYSEQID 0xfffff #define V_FW_EQ_CTRL_CMD_PHYSEQID(x) ((x) << S_FW_EQ_CTRL_CMD_PHYSEQID) #define G_FW_EQ_CTRL_CMD_PHYSEQID(x) \ (((x) >> S_FW_EQ_CTRL_CMD_PHYSEQID) & M_FW_EQ_CTRL_CMD_PHYSEQID) #define S_FW_EQ_CTRL_CMD_FETCHSZM 26 #define M_FW_EQ_CTRL_CMD_FETCHSZM 0x1 #define V_FW_EQ_CTRL_CMD_FETCHSZM(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHSZM) #define G_FW_EQ_CTRL_CMD_FETCHSZM(x) \ (((x) >> S_FW_EQ_CTRL_CMD_FETCHSZM) & M_FW_EQ_CTRL_CMD_FETCHSZM) #define F_FW_EQ_CTRL_CMD_FETCHSZM V_FW_EQ_CTRL_CMD_FETCHSZM(1U) #define S_FW_EQ_CTRL_CMD_STATUSPGNS 25 #define M_FW_EQ_CTRL_CMD_STATUSPGNS 0x1 #define V_FW_EQ_CTRL_CMD_STATUSPGNS(x) ((x) << S_FW_EQ_CTRL_CMD_STATUSPGNS) #define G_FW_EQ_CTRL_CMD_STATUSPGNS(x) \ (((x) >> S_FW_EQ_CTRL_CMD_STATUSPGNS) & M_FW_EQ_CTRL_CMD_STATUSPGNS) #define F_FW_EQ_CTRL_CMD_STATUSPGNS V_FW_EQ_CTRL_CMD_STATUSPGNS(1U) #define S_FW_EQ_CTRL_CMD_STATUSPGRO 24 #define M_FW_EQ_CTRL_CMD_STATUSPGRO 0x1 #define V_FW_EQ_CTRL_CMD_STATUSPGRO(x) ((x) << S_FW_EQ_CTRL_CMD_STATUSPGRO) #define G_FW_EQ_CTRL_CMD_STATUSPGRO(x) \ (((x) >> S_FW_EQ_CTRL_CMD_STATUSPGRO) & M_FW_EQ_CTRL_CMD_STATUSPGRO) #define F_FW_EQ_CTRL_CMD_STATUSPGRO V_FW_EQ_CTRL_CMD_STATUSPGRO(1U) #define S_FW_EQ_CTRL_CMD_FETCHNS 23 #define M_FW_EQ_CTRL_CMD_FETCHNS 0x1 #define V_FW_EQ_CTRL_CMD_FETCHNS(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHNS) #define G_FW_EQ_CTRL_CMD_FETCHNS(x) \ (((x) >> S_FW_EQ_CTRL_CMD_FETCHNS) & M_FW_EQ_CTRL_CMD_FETCHNS) #define F_FW_EQ_CTRL_CMD_FETCHNS V_FW_EQ_CTRL_CMD_FETCHNS(1U) #define S_FW_EQ_CTRL_CMD_FETCHRO 22 #define M_FW_EQ_CTRL_CMD_FETCHRO 0x1 #define V_FW_EQ_CTRL_CMD_FETCHRO(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHRO) #define G_FW_EQ_CTRL_CMD_FETCHRO(x) \ (((x) >> S_FW_EQ_CTRL_CMD_FETCHRO) & M_FW_EQ_CTRL_CMD_FETCHRO) #define F_FW_EQ_CTRL_CMD_FETCHRO V_FW_EQ_CTRL_CMD_FETCHRO(1U) #define S_FW_EQ_CTRL_CMD_HOSTFCMODE 20 #define M_FW_EQ_CTRL_CMD_HOSTFCMODE 0x3 #define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE) #define G_FW_EQ_CTRL_CMD_HOSTFCMODE(x) \ (((x) >> S_FW_EQ_CTRL_CMD_HOSTFCMODE) & M_FW_EQ_CTRL_CMD_HOSTFCMODE) #define S_FW_EQ_CTRL_CMD_CPRIO 19 #define M_FW_EQ_CTRL_CMD_CPRIO 0x1 #define V_FW_EQ_CTRL_CMD_CPRIO(x) ((x) << S_FW_EQ_CTRL_CMD_CPRIO) #define G_FW_EQ_CTRL_CMD_CPRIO(x) \ (((x) >> S_FW_EQ_CTRL_CMD_CPRIO) & M_FW_EQ_CTRL_CMD_CPRIO) #define F_FW_EQ_CTRL_CMD_CPRIO V_FW_EQ_CTRL_CMD_CPRIO(1U) #define S_FW_EQ_CTRL_CMD_ONCHIP 18 #define M_FW_EQ_CTRL_CMD_ONCHIP 0x1 #define V_FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << S_FW_EQ_CTRL_CMD_ONCHIP) #define G_FW_EQ_CTRL_CMD_ONCHIP(x) \ (((x) >> S_FW_EQ_CTRL_CMD_ONCHIP) & M_FW_EQ_CTRL_CMD_ONCHIP) #define F_FW_EQ_CTRL_CMD_ONCHIP V_FW_EQ_CTRL_CMD_ONCHIP(1U) #define S_FW_EQ_CTRL_CMD_PCIECHN 16 #define M_FW_EQ_CTRL_CMD_PCIECHN 0x3 #define V_FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << S_FW_EQ_CTRL_CMD_PCIECHN) #define G_FW_EQ_CTRL_CMD_PCIECHN(x) \ (((x) >> S_FW_EQ_CTRL_CMD_PCIECHN) & M_FW_EQ_CTRL_CMD_PCIECHN) #define S_FW_EQ_CTRL_CMD_IQID 0 #define M_FW_EQ_CTRL_CMD_IQID 0xffff #define V_FW_EQ_CTRL_CMD_IQID(x) ((x) << S_FW_EQ_CTRL_CMD_IQID) #define G_FW_EQ_CTRL_CMD_IQID(x) \ (((x) >> S_FW_EQ_CTRL_CMD_IQID) & M_FW_EQ_CTRL_CMD_IQID) #define S_FW_EQ_CTRL_CMD_DCAEN 31 #define M_FW_EQ_CTRL_CMD_DCAEN 0x1 #define V_FW_EQ_CTRL_CMD_DCAEN(x) ((x) << S_FW_EQ_CTRL_CMD_DCAEN) #define G_FW_EQ_CTRL_CMD_DCAEN(x) \ (((x) >> S_FW_EQ_CTRL_CMD_DCAEN) & M_FW_EQ_CTRL_CMD_DCAEN) #define F_FW_EQ_CTRL_CMD_DCAEN V_FW_EQ_CTRL_CMD_DCAEN(1U) #define S_FW_EQ_CTRL_CMD_DCACPU 26 #define M_FW_EQ_CTRL_CMD_DCACPU 0x1f #define V_FW_EQ_CTRL_CMD_DCACPU(x) ((x) << S_FW_EQ_CTRL_CMD_DCACPU) #define G_FW_EQ_CTRL_CMD_DCACPU(x) \ (((x) >> S_FW_EQ_CTRL_CMD_DCACPU) & M_FW_EQ_CTRL_CMD_DCACPU) #define S_FW_EQ_CTRL_CMD_FBMIN 23 #define M_FW_EQ_CTRL_CMD_FBMIN 0x7 #define V_FW_EQ_CTRL_CMD_FBMIN(x) ((x) << S_FW_EQ_CTRL_CMD_FBMIN) #define G_FW_EQ_CTRL_CMD_FBMIN(x) \ (((x) >> S_FW_EQ_CTRL_CMD_FBMIN) & M_FW_EQ_CTRL_CMD_FBMIN) #define S_FW_EQ_CTRL_CMD_FBMAX 20 #define M_FW_EQ_CTRL_CMD_FBMAX 0x7 #define V_FW_EQ_CTRL_CMD_FBMAX(x) ((x) << S_FW_EQ_CTRL_CMD_FBMAX) #define G_FW_EQ_CTRL_CMD_FBMAX(x) \ (((x) >> S_FW_EQ_CTRL_CMD_FBMAX) & M_FW_EQ_CTRL_CMD_FBMAX) #define S_FW_EQ_CTRL_CMD_CIDXFTHRESHO 19 #define M_FW_EQ_CTRL_CMD_CIDXFTHRESHO 0x1 #define V_FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) \ ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESHO) #define G_FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) \ (((x) >> S_FW_EQ_CTRL_CMD_CIDXFTHRESHO) & M_FW_EQ_CTRL_CMD_CIDXFTHRESHO) #define F_FW_EQ_CTRL_CMD_CIDXFTHRESHO V_FW_EQ_CTRL_CMD_CIDXFTHRESHO(1U) #define S_FW_EQ_CTRL_CMD_CIDXFTHRESH 16 #define M_FW_EQ_CTRL_CMD_CIDXFTHRESH 0x7 #define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH) #define G_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) \ (((x) >> S_FW_EQ_CTRL_CMD_CIDXFTHRESH) & M_FW_EQ_CTRL_CMD_CIDXFTHRESH) #define S_FW_EQ_CTRL_CMD_EQSIZE 0 #define M_FW_EQ_CTRL_CMD_EQSIZE 0xffff #define V_FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << S_FW_EQ_CTRL_CMD_EQSIZE) #define G_FW_EQ_CTRL_CMD_EQSIZE(x) \ (((x) >> S_FW_EQ_CTRL_CMD_EQSIZE) & M_FW_EQ_CTRL_CMD_EQSIZE) struct fw_eq_ofld_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; __be32 eqid_pkd; __be32 physeqid_pkd; __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; }; #define S_FW_EQ_OFLD_CMD_PFN 8 #define M_FW_EQ_OFLD_CMD_PFN 0x7 #define V_FW_EQ_OFLD_CMD_PFN(x) ((x) << S_FW_EQ_OFLD_CMD_PFN) #define G_FW_EQ_OFLD_CMD_PFN(x) \ (((x) >> S_FW_EQ_OFLD_CMD_PFN) & M_FW_EQ_OFLD_CMD_PFN) #define S_FW_EQ_OFLD_CMD_VFN 0 #define M_FW_EQ_OFLD_CMD_VFN 0xff #define V_FW_EQ_OFLD_CMD_VFN(x) ((x) << S_FW_EQ_OFLD_CMD_VFN) #define G_FW_EQ_OFLD_CMD_VFN(x) \ (((x) >> S_FW_EQ_OFLD_CMD_VFN) & M_FW_EQ_OFLD_CMD_VFN) #define S_FW_EQ_OFLD_CMD_ALLOC 31 #define M_FW_EQ_OFLD_CMD_ALLOC 0x1 #define V_FW_EQ_OFLD_CMD_ALLOC(x) ((x) << S_FW_EQ_OFLD_CMD_ALLOC) #define G_FW_EQ_OFLD_CMD_ALLOC(x) \ (((x) >> S_FW_EQ_OFLD_CMD_ALLOC) & M_FW_EQ_OFLD_CMD_ALLOC) #define F_FW_EQ_OFLD_CMD_ALLOC V_FW_EQ_OFLD_CMD_ALLOC(1U) #define S_FW_EQ_OFLD_CMD_FREE 30 #define M_FW_EQ_OFLD_CMD_FREE 0x1 #define V_FW_EQ_OFLD_CMD_FREE(x) ((x) << S_FW_EQ_OFLD_CMD_FREE) #define G_FW_EQ_OFLD_CMD_FREE(x) \ (((x) >> S_FW_EQ_OFLD_CMD_FREE) & M_FW_EQ_OFLD_CMD_FREE) #define F_FW_EQ_OFLD_CMD_FREE V_FW_EQ_OFLD_CMD_FREE(1U) #define S_FW_EQ_OFLD_CMD_MODIFY 29 #define M_FW_EQ_OFLD_CMD_MODIFY 0x1 #define V_FW_EQ_OFLD_CMD_MODIFY(x) ((x) << S_FW_EQ_OFLD_CMD_MODIFY) #define G_FW_EQ_OFLD_CMD_MODIFY(x) \ (((x) >> S_FW_EQ_OFLD_CMD_MODIFY) & M_FW_EQ_OFLD_CMD_MODIFY) #define F_FW_EQ_OFLD_CMD_MODIFY V_FW_EQ_OFLD_CMD_MODIFY(1U) #define S_FW_EQ_OFLD_CMD_EQSTART 28 #define M_FW_EQ_OFLD_CMD_EQSTART 0x1 #define V_FW_EQ_OFLD_CMD_EQSTART(x) ((x) << S_FW_EQ_OFLD_CMD_EQSTART) #define G_FW_EQ_OFLD_CMD_EQSTART(x) \ (((x) >> S_FW_EQ_OFLD_CMD_EQSTART) & M_FW_EQ_OFLD_CMD_EQSTART) #define F_FW_EQ_OFLD_CMD_EQSTART V_FW_EQ_OFLD_CMD_EQSTART(1U) #define S_FW_EQ_OFLD_CMD_EQSTOP 27 #define M_FW_EQ_OFLD_CMD_EQSTOP 0x1 #define V_FW_EQ_OFLD_CMD_EQSTOP(x) ((x) << S_FW_EQ_OFLD_CMD_EQSTOP) #define G_FW_EQ_OFLD_CMD_EQSTOP(x) \ (((x) >> S_FW_EQ_OFLD_CMD_EQSTOP) & M_FW_EQ_OFLD_CMD_EQSTOP) #define F_FW_EQ_OFLD_CMD_EQSTOP V_FW_EQ_OFLD_CMD_EQSTOP(1U) #define S_FW_EQ_OFLD_CMD_EQID 0 #define M_FW_EQ_OFLD_CMD_EQID 0xfffff #define V_FW_EQ_OFLD_CMD_EQID(x) ((x) << S_FW_EQ_OFLD_CMD_EQID) #define G_FW_EQ_OFLD_CMD_EQID(x) \ (((x) >> S_FW_EQ_OFLD_CMD_EQID) & M_FW_EQ_OFLD_CMD_EQID) #define S_FW_EQ_OFLD_CMD_PHYSEQID 0 #define M_FW_EQ_OFLD_CMD_PHYSEQID 0xfffff #define V_FW_EQ_OFLD_CMD_PHYSEQID(x) ((x) << S_FW_EQ_OFLD_CMD_PHYSEQID) #define G_FW_EQ_OFLD_CMD_PHYSEQID(x) \ (((x) >> S_FW_EQ_OFLD_CMD_PHYSEQID) & M_FW_EQ_OFLD_CMD_PHYSEQID) #define S_FW_EQ_OFLD_CMD_FETCHSZM 26 #define M_FW_EQ_OFLD_CMD_FETCHSZM 0x1 #define V_FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << S_FW_EQ_OFLD_CMD_FETCHSZM) #define G_FW_EQ_OFLD_CMD_FETCHSZM(x) \ (((x) >> S_FW_EQ_OFLD_CMD_FETCHSZM) & M_FW_EQ_OFLD_CMD_FETCHSZM) #define F_FW_EQ_OFLD_CMD_FETCHSZM V_FW_EQ_OFLD_CMD_FETCHSZM(1U) #define S_FW_EQ_OFLD_CMD_STATUSPGNS 25 #define M_FW_EQ_OFLD_CMD_STATUSPGNS 0x1 #define V_FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << S_FW_EQ_OFLD_CMD_STATUSPGNS) #define G_FW_EQ_OFLD_CMD_STATUSPGNS(x) \ (((x) >> S_FW_EQ_OFLD_CMD_STATUSPGNS) & M_FW_EQ_OFLD_CMD_STATUSPGNS) #define F_FW_EQ_OFLD_CMD_STATUSPGNS V_FW_EQ_OFLD_CMD_STATUSPGNS(1U) #define S_FW_EQ_OFLD_CMD_STATUSPGRO 24 #define M_FW_EQ_OFLD_CMD_STATUSPGRO 0x1 #define V_FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << S_FW_EQ_OFLD_CMD_STATUSPGRO) #define G_FW_EQ_OFLD_CMD_STATUSPGRO(x) \ (((x) >> S_FW_EQ_OFLD_CMD_STATUSPGRO) & M_FW_EQ_OFLD_CMD_STATUSPGRO) #define F_FW_EQ_OFLD_CMD_STATUSPGRO V_FW_EQ_OFLD_CMD_STATUSPGRO(1U) #define S_FW_EQ_OFLD_CMD_FETCHNS 23 #define M_FW_EQ_OFLD_CMD_FETCHNS 0x1 #define V_FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << S_FW_EQ_OFLD_CMD_FETCHNS) #define G_FW_EQ_OFLD_CMD_FETCHNS(x) \ (((x) >> S_FW_EQ_OFLD_CMD_FETCHNS) & M_FW_EQ_OFLD_CMD_FETCHNS) #define F_FW_EQ_OFLD_CMD_FETCHNS V_FW_EQ_OFLD_CMD_FETCHNS(1U) #define S_FW_EQ_OFLD_CMD_FETCHRO 22 #define M_FW_EQ_OFLD_CMD_FETCHRO 0x1 #define V_FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << S_FW_EQ_OFLD_CMD_FETCHRO) #define G_FW_EQ_OFLD_CMD_FETCHRO(x) \ (((x) >> S_FW_EQ_OFLD_CMD_FETCHRO) & M_FW_EQ_OFLD_CMD_FETCHRO) #define F_FW_EQ_OFLD_CMD_FETCHRO V_FW_EQ_OFLD_CMD_FETCHRO(1U) #define S_FW_EQ_OFLD_CMD_HOSTFCMODE 20 #define M_FW_EQ_OFLD_CMD_HOSTFCMODE 0x3 #define V_FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_OFLD_CMD_HOSTFCMODE) #define G_FW_EQ_OFLD_CMD_HOSTFCMODE(x) \ (((x) >> S_FW_EQ_OFLD_CMD_HOSTFCMODE) & M_FW_EQ_OFLD_CMD_HOSTFCMODE) #define S_FW_EQ_OFLD_CMD_CPRIO 19 #define M_FW_EQ_OFLD_CMD_CPRIO 0x1 #define V_FW_EQ_OFLD_CMD_CPRIO(x) ((x) << S_FW_EQ_OFLD_CMD_CPRIO) #define G_FW_EQ_OFLD_CMD_CPRIO(x) \ (((x) >> S_FW_EQ_OFLD_CMD_CPRIO) & M_FW_EQ_OFLD_CMD_CPRIO) #define F_FW_EQ_OFLD_CMD_CPRIO V_FW_EQ_OFLD_CMD_CPRIO(1U) #define S_FW_EQ_OFLD_CMD_ONCHIP 18 #define M_FW_EQ_OFLD_CMD_ONCHIP 0x1 #define V_FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << S_FW_EQ_OFLD_CMD_ONCHIP) #define G_FW_EQ_OFLD_CMD_ONCHIP(x) \ (((x) >> S_FW_EQ_OFLD_CMD_ONCHIP) & M_FW_EQ_OFLD_CMD_ONCHIP) #define F_FW_EQ_OFLD_CMD_ONCHIP V_FW_EQ_OFLD_CMD_ONCHIP(1U) #define S_FW_EQ_OFLD_CMD_PCIECHN 16 #define M_FW_EQ_OFLD_CMD_PCIECHN 0x3 #define V_FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << S_FW_EQ_OFLD_CMD_PCIECHN) #define G_FW_EQ_OFLD_CMD_PCIECHN(x) \ (((x) >> S_FW_EQ_OFLD_CMD_PCIECHN) & M_FW_EQ_OFLD_CMD_PCIECHN) #define S_FW_EQ_OFLD_CMD_IQID 0 #define M_FW_EQ_OFLD_CMD_IQID 0xffff #define V_FW_EQ_OFLD_CMD_IQID(x) ((x) << S_FW_EQ_OFLD_CMD_IQID) #define G_FW_EQ_OFLD_CMD_IQID(x) \ (((x) >> S_FW_EQ_OFLD_CMD_IQID) & M_FW_EQ_OFLD_CMD_IQID) #define S_FW_EQ_OFLD_CMD_DCAEN 31 #define M_FW_EQ_OFLD_CMD_DCAEN 0x1 #define V_FW_EQ_OFLD_CMD_DCAEN(x) ((x) << S_FW_EQ_OFLD_CMD_DCAEN) #define G_FW_EQ_OFLD_CMD_DCAEN(x) \ (((x) >> S_FW_EQ_OFLD_CMD_DCAEN) & M_FW_EQ_OFLD_CMD_DCAEN) #define F_FW_EQ_OFLD_CMD_DCAEN V_FW_EQ_OFLD_CMD_DCAEN(1U) #define S_FW_EQ_OFLD_CMD_DCACPU 26 #define M_FW_EQ_OFLD_CMD_DCACPU 0x1f #define V_FW_EQ_OFLD_CMD_DCACPU(x) ((x) << S_FW_EQ_OFLD_CMD_DCACPU) #define G_FW_EQ_OFLD_CMD_DCACPU(x) \ (((x) >> S_FW_EQ_OFLD_CMD_DCACPU) & M_FW_EQ_OFLD_CMD_DCACPU) #define S_FW_EQ_OFLD_CMD_FBMIN 23 #define M_FW_EQ_OFLD_CMD_FBMIN 0x7 #define V_FW_EQ_OFLD_CMD_FBMIN(x) ((x) << S_FW_EQ_OFLD_CMD_FBMIN) #define G_FW_EQ_OFLD_CMD_FBMIN(x) \ (((x) >> S_FW_EQ_OFLD_CMD_FBMIN) & M_FW_EQ_OFLD_CMD_FBMIN) #define S_FW_EQ_OFLD_CMD_FBMAX 20 #define M_FW_EQ_OFLD_CMD_FBMAX 0x7 #define V_FW_EQ_OFLD_CMD_FBMAX(x) ((x) << S_FW_EQ_OFLD_CMD_FBMAX) #define G_FW_EQ_OFLD_CMD_FBMAX(x) \ (((x) >> S_FW_EQ_OFLD_CMD_FBMAX) & M_FW_EQ_OFLD_CMD_FBMAX) #define S_FW_EQ_OFLD_CMD_CIDXFTHRESHO 19 #define M_FW_EQ_OFLD_CMD_CIDXFTHRESHO 0x1 #define V_FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) \ ((x) << S_FW_EQ_OFLD_CMD_CIDXFTHRESHO) #define G_FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) \ (((x) >> S_FW_EQ_OFLD_CMD_CIDXFTHRESHO) & M_FW_EQ_OFLD_CMD_CIDXFTHRESHO) #define F_FW_EQ_OFLD_CMD_CIDXFTHRESHO V_FW_EQ_OFLD_CMD_CIDXFTHRESHO(1U) #define S_FW_EQ_OFLD_CMD_CIDXFTHRESH 16 #define M_FW_EQ_OFLD_CMD_CIDXFTHRESH 0x7 #define V_FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_OFLD_CMD_CIDXFTHRESH) #define G_FW_EQ_OFLD_CMD_CIDXFTHRESH(x) \ (((x) >> S_FW_EQ_OFLD_CMD_CIDXFTHRESH) & M_FW_EQ_OFLD_CMD_CIDXFTHRESH) #define S_FW_EQ_OFLD_CMD_EQSIZE 0 #define M_FW_EQ_OFLD_CMD_EQSIZE 0xffff #define V_FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << S_FW_EQ_OFLD_CMD_EQSIZE) #define G_FW_EQ_OFLD_CMD_EQSIZE(x) \ (((x) >> S_FW_EQ_OFLD_CMD_EQSIZE) & M_FW_EQ_OFLD_CMD_EQSIZE) /* Macros for VIID parsing: VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number */ #define S_FW_VIID_PFN 8 #define M_FW_VIID_PFN 0x7 #define V_FW_VIID_PFN(x) ((x) << S_FW_VIID_PFN) #define G_FW_VIID_PFN(x) (((x) >> S_FW_VIID_PFN) & M_FW_VIID_PFN) #define S_FW_VIID_VIVLD 7 #define M_FW_VIID_VIVLD 0x1 #define V_FW_VIID_VIVLD(x) ((x) << S_FW_VIID_VIVLD) #define G_FW_VIID_VIVLD(x) (((x) >> S_FW_VIID_VIVLD) & M_FW_VIID_VIVLD) #define S_FW_VIID_VIN 0 #define M_FW_VIID_VIN 0x7F #define V_FW_VIID_VIN(x) ((x) << S_FW_VIID_VIN) #define G_FW_VIID_VIN(x) (((x) >> S_FW_VIID_VIN) & M_FW_VIID_VIN) /* Macros for VIID parsing: VIID - [11:9] PFN, [8] VI Valid, [7:0] VI number */ #define S_FW_256VIID_PFN 9 #define M_FW_256VIID_PFN 0x7 #define V_FW_256VIID_PFN(x) ((x) << S_FW_256VIID_PFN) #define G_FW_256VIID_PFN(x) (((x) >> S_FW_256VIID_PFN) & M_FW_256VIID_PFN) #define S_FW_256VIID_VIVLD 8 #define M_FW_256VIID_VIVLD 0x1 #define V_FW_256VIID_VIVLD(x) ((x) << S_FW_256VIID_VIVLD) #define G_FW_256VIID_VIVLD(x) (((x) >> S_FW_256VIID_VIVLD) & M_FW_256VIID_VIVLD) #define S_FW_256VIID_VIN 0 #define M_FW_256VIID_VIN 0xFF #define V_FW_256VIID_VIN(x) ((x) << S_FW_256VIID_VIN) #define G_FW_256VIID_VIN(x) (((x) >> S_FW_256VIID_VIN) & M_FW_256VIID_VIN) enum fw_vi_func { FW_VI_FUNC_ETH, FW_VI_FUNC_OFLD, FW_VI_FUNC_IWARP, FW_VI_FUNC_OPENISCSI, FW_VI_FUNC_OPENFCOE, FW_VI_FUNC_FOISCSI, FW_VI_FUNC_FOFCOE, FW_VI_FUNC_FW, }; struct fw_vi_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; __be16 type_to_viid; __u8 mac[6]; __u8 portid_pkd; __u8 nmac; __u8 nmac0[6]; __be16 norss_rsssize; __u8 nmac1[6]; __be16 idsiiq_pkd; __u8 nmac2[6]; __be16 idseiq_pkd; __u8 nmac3[6]; __be64 r9; __be64 r10; }; #define S_FW_VI_CMD_PFN 8 #define M_FW_VI_CMD_PFN 0x7 #define V_FW_VI_CMD_PFN(x) ((x) << S_FW_VI_CMD_PFN) #define G_FW_VI_CMD_PFN(x) \ (((x) >> S_FW_VI_CMD_PFN) & M_FW_VI_CMD_PFN) #define S_FW_VI_CMD_VFN 0 #define M_FW_VI_CMD_VFN 0xff #define V_FW_VI_CMD_VFN(x) ((x) << S_FW_VI_CMD_VFN) #define G_FW_VI_CMD_VFN(x) \ (((x) >> S_FW_VI_CMD_VFN) & M_FW_VI_CMD_VFN) #define S_FW_VI_CMD_ALLOC 31 #define M_FW_VI_CMD_ALLOC 0x1 #define V_FW_VI_CMD_ALLOC(x) ((x) << S_FW_VI_CMD_ALLOC) #define G_FW_VI_CMD_ALLOC(x) \ (((x) >> S_FW_VI_CMD_ALLOC) & M_FW_VI_CMD_ALLOC) #define F_FW_VI_CMD_ALLOC V_FW_VI_CMD_ALLOC(1U) #define S_FW_VI_CMD_FREE 30 #define M_FW_VI_CMD_FREE 0x1 #define V_FW_VI_CMD_FREE(x) ((x) << S_FW_VI_CMD_FREE) #define G_FW_VI_CMD_FREE(x) \ (((x) >> S_FW_VI_CMD_FREE) & M_FW_VI_CMD_FREE) #define F_FW_VI_CMD_FREE V_FW_VI_CMD_FREE(1U) +#define S_FW_VI_CMD_VFVLD 24 +#define M_FW_VI_CMD_VFVLD 0x1 +#define V_FW_VI_CMD_VFVLD(x) ((x) << S_FW_VI_CMD_VFVLD) +#define G_FW_VI_CMD_VFVLD(x) \ + (((x) >> S_FW_VI_CMD_VFVLD) & M_FW_VI_CMD_VFVLD) +#define F_FW_VI_CMD_VFVLD V_FW_VI_CMD_VFVLD(1U) + +#define S_FW_VI_CMD_VIN 16 +#define M_FW_VI_CMD_VIN 0xff +#define V_FW_VI_CMD_VIN(x) ((x) << S_FW_VI_CMD_VIN) +#define G_FW_VI_CMD_VIN(x) \ + (((x) >> S_FW_VI_CMD_VIN) & M_FW_VI_CMD_VIN) + #define S_FW_VI_CMD_TYPE 15 #define M_FW_VI_CMD_TYPE 0x1 #define V_FW_VI_CMD_TYPE(x) ((x) << S_FW_VI_CMD_TYPE) #define G_FW_VI_CMD_TYPE(x) \ (((x) >> S_FW_VI_CMD_TYPE) & M_FW_VI_CMD_TYPE) #define F_FW_VI_CMD_TYPE V_FW_VI_CMD_TYPE(1U) #define S_FW_VI_CMD_FUNC 12 #define M_FW_VI_CMD_FUNC 0x7 #define V_FW_VI_CMD_FUNC(x) ((x) << S_FW_VI_CMD_FUNC) #define G_FW_VI_CMD_FUNC(x) \ (((x) >> S_FW_VI_CMD_FUNC) & M_FW_VI_CMD_FUNC) #define S_FW_VI_CMD_VIID 0 #define M_FW_VI_CMD_VIID 0xfff #define V_FW_VI_CMD_VIID(x) ((x) << S_FW_VI_CMD_VIID) #define G_FW_VI_CMD_VIID(x) \ (((x) >> S_FW_VI_CMD_VIID) & M_FW_VI_CMD_VIID) #define S_FW_VI_CMD_PORTID 4 #define M_FW_VI_CMD_PORTID 0xf #define V_FW_VI_CMD_PORTID(x) ((x) << S_FW_VI_CMD_PORTID) #define G_FW_VI_CMD_PORTID(x) \ (((x) >> S_FW_VI_CMD_PORTID) & M_FW_VI_CMD_PORTID) #define S_FW_VI_CMD_NORSS 11 #define M_FW_VI_CMD_NORSS 0x1 #define V_FW_VI_CMD_NORSS(x) ((x) << S_FW_VI_CMD_NORSS) #define G_FW_VI_CMD_NORSS(x) \ (((x) >> S_FW_VI_CMD_NORSS) & M_FW_VI_CMD_NORSS) #define F_FW_VI_CMD_NORSS V_FW_VI_CMD_NORSS(1U) #define S_FW_VI_CMD_RSSSIZE 0 #define M_FW_VI_CMD_RSSSIZE 0x7ff #define V_FW_VI_CMD_RSSSIZE(x) ((x) << S_FW_VI_CMD_RSSSIZE) #define G_FW_VI_CMD_RSSSIZE(x) \ (((x) >> S_FW_VI_CMD_RSSSIZE) & M_FW_VI_CMD_RSSSIZE) #define S_FW_VI_CMD_IDSIIQ 0 #define M_FW_VI_CMD_IDSIIQ 0x3ff #define V_FW_VI_CMD_IDSIIQ(x) ((x) << S_FW_VI_CMD_IDSIIQ) #define G_FW_VI_CMD_IDSIIQ(x) \ (((x) >> S_FW_VI_CMD_IDSIIQ) & M_FW_VI_CMD_IDSIIQ) #define S_FW_VI_CMD_IDSEIQ 0 #define M_FW_VI_CMD_IDSEIQ 0x3ff #define V_FW_VI_CMD_IDSEIQ(x) ((x) << S_FW_VI_CMD_IDSEIQ) #define G_FW_VI_CMD_IDSEIQ(x) \ (((x) >> S_FW_VI_CMD_IDSEIQ) & M_FW_VI_CMD_IDSEIQ) /* Special VI_MAC command index ids */ #define FW_VI_MAC_ADD_MAC 0x3FF #define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE #define FW_VI_MAC_MAC_BASED_FREE 0x3FD #define FW_VI_MAC_ID_BASED_FREE 0x3FC enum fw_vi_mac_smac { FW_VI_MAC_MPS_TCAM_ENTRY, FW_VI_MAC_MPS_TCAM_ONLY, FW_VI_MAC_SMT_ONLY, FW_VI_MAC_SMT_AND_MPSTCAM }; enum fw_vi_mac_result { FW_VI_MAC_R_SUCCESS, FW_VI_MAC_R_F_NONEXISTENT_NOMEM, FW_VI_MAC_R_SMAC_FAIL, FW_VI_MAC_R_F_ACL_CHECK }; enum fw_vi_mac_entry_types { FW_VI_MAC_TYPE_EXACTMAC, FW_VI_MAC_TYPE_HASHVEC, FW_VI_MAC_TYPE_RAW, FW_VI_MAC_TYPE_EXACTMAC_VNI, }; struct fw_vi_mac_cmd { __be32 op_to_viid; __be32 freemacs_to_len16; union fw_vi_mac { struct fw_vi_mac_exact { __be16 valid_to_idx; __u8 macaddr[6]; } exact[7]; struct fw_vi_mac_hash { __be64 hashvec; } hash; struct fw_vi_mac_raw { __be32 raw_idx_pkd; __be32 data0_pkd; __be32 data1[2]; __be64 data0m_pkd; __be32 data1m[2]; } raw; struct fw_vi_mac_vni { __be16 valid_to_idx; __u8 macaddr[6]; __be16 r7; __u8 macaddr_mask[6]; __be32 lookup_type_to_vni; __be32 vni_mask_pkd; } exact_vni[2]; } u; }; + +#define S_FW_VI_MAC_CMD_SMTID 12 +#define M_FW_VI_MAC_CMD_SMTID 0xff +#define V_FW_VI_MAC_CMD_SMTID(x) ((x) << S_FW_VI_MAC_CMD_SMTID) +#define G_FW_VI_MAC_CMD_SMTID(x) \ + (((x) >> S_FW_VI_MAC_CMD_SMTID) & M_FW_VI_MAC_CMD_SMTID) #define S_FW_VI_MAC_CMD_VIID 0 #define M_FW_VI_MAC_CMD_VIID 0xfff #define V_FW_VI_MAC_CMD_VIID(x) ((x) << S_FW_VI_MAC_CMD_VIID) #define G_FW_VI_MAC_CMD_VIID(x) \ (((x) >> S_FW_VI_MAC_CMD_VIID) & M_FW_VI_MAC_CMD_VIID) #define S_FW_VI_MAC_CMD_FREEMACS 31 #define M_FW_VI_MAC_CMD_FREEMACS 0x1 #define V_FW_VI_MAC_CMD_FREEMACS(x) ((x) << S_FW_VI_MAC_CMD_FREEMACS) #define G_FW_VI_MAC_CMD_FREEMACS(x) \ (((x) >> S_FW_VI_MAC_CMD_FREEMACS) & M_FW_VI_MAC_CMD_FREEMACS) #define F_FW_VI_MAC_CMD_FREEMACS V_FW_VI_MAC_CMD_FREEMACS(1U) #define S_FW_VI_MAC_CMD_IS_SMAC 30 #define M_FW_VI_MAC_CMD_IS_SMAC 0x1 #define V_FW_VI_MAC_CMD_IS_SMAC(x) ((x) << S_FW_VI_MAC_CMD_IS_SMAC) #define G_FW_VI_MAC_CMD_IS_SMAC(x) \ (((x) >> S_FW_VI_MAC_CMD_IS_SMAC) & M_FW_VI_MAC_CMD_IS_SMAC) #define F_FW_VI_MAC_CMD_IS_SMAC V_FW_VI_MAC_CMD_IS_SMAC(1U) #define S_FW_VI_MAC_CMD_ENTRY_TYPE 23 #define M_FW_VI_MAC_CMD_ENTRY_TYPE 0x7 #define V_FW_VI_MAC_CMD_ENTRY_TYPE(x) ((x) << S_FW_VI_MAC_CMD_ENTRY_TYPE) #define G_FW_VI_MAC_CMD_ENTRY_TYPE(x) \ (((x) >> S_FW_VI_MAC_CMD_ENTRY_TYPE) & M_FW_VI_MAC_CMD_ENTRY_TYPE) #define S_FW_VI_MAC_CMD_HASHUNIEN 22 #define M_FW_VI_MAC_CMD_HASHUNIEN 0x1 #define V_FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << S_FW_VI_MAC_CMD_HASHUNIEN) #define G_FW_VI_MAC_CMD_HASHUNIEN(x) \ (((x) >> S_FW_VI_MAC_CMD_HASHUNIEN) & M_FW_VI_MAC_CMD_HASHUNIEN) #define F_FW_VI_MAC_CMD_HASHUNIEN V_FW_VI_MAC_CMD_HASHUNIEN(1U) #define S_FW_VI_MAC_CMD_VALID 15 #define M_FW_VI_MAC_CMD_VALID 0x1 #define V_FW_VI_MAC_CMD_VALID(x) ((x) << S_FW_VI_MAC_CMD_VALID) #define G_FW_VI_MAC_CMD_VALID(x) \ (((x) >> S_FW_VI_MAC_CMD_VALID) & M_FW_VI_MAC_CMD_VALID) #define F_FW_VI_MAC_CMD_VALID V_FW_VI_MAC_CMD_VALID(1U) #define S_FW_VI_MAC_CMD_PRIO 12 #define M_FW_VI_MAC_CMD_PRIO 0x7 #define V_FW_VI_MAC_CMD_PRIO(x) ((x) << S_FW_VI_MAC_CMD_PRIO) #define G_FW_VI_MAC_CMD_PRIO(x) \ (((x) >> S_FW_VI_MAC_CMD_PRIO) & M_FW_VI_MAC_CMD_PRIO) #define S_FW_VI_MAC_CMD_SMAC_RESULT 10 #define M_FW_VI_MAC_CMD_SMAC_RESULT 0x3 #define V_FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << S_FW_VI_MAC_CMD_SMAC_RESULT) #define G_FW_VI_MAC_CMD_SMAC_RESULT(x) \ (((x) >> S_FW_VI_MAC_CMD_SMAC_RESULT) & M_FW_VI_MAC_CMD_SMAC_RESULT) #define S_FW_VI_MAC_CMD_IDX 0 #define M_FW_VI_MAC_CMD_IDX 0x3ff #define V_FW_VI_MAC_CMD_IDX(x) ((x) << S_FW_VI_MAC_CMD_IDX) #define G_FW_VI_MAC_CMD_IDX(x) \ (((x) >> S_FW_VI_MAC_CMD_IDX) & M_FW_VI_MAC_CMD_IDX) #define S_FW_VI_MAC_CMD_RAW_IDX 16 #define M_FW_VI_MAC_CMD_RAW_IDX 0xffff #define V_FW_VI_MAC_CMD_RAW_IDX(x) ((x) << S_FW_VI_MAC_CMD_RAW_IDX) #define G_FW_VI_MAC_CMD_RAW_IDX(x) \ (((x) >> S_FW_VI_MAC_CMD_RAW_IDX) & M_FW_VI_MAC_CMD_RAW_IDX) #define S_FW_VI_MAC_CMD_DATA0 0 #define M_FW_VI_MAC_CMD_DATA0 0xffff #define V_FW_VI_MAC_CMD_DATA0(x) ((x) << S_FW_VI_MAC_CMD_DATA0) #define G_FW_VI_MAC_CMD_DATA0(x) \ (((x) >> S_FW_VI_MAC_CMD_DATA0) & M_FW_VI_MAC_CMD_DATA0) #define S_FW_VI_MAC_CMD_LOOKUP_TYPE 31 #define M_FW_VI_MAC_CMD_LOOKUP_TYPE 0x1 #define V_FW_VI_MAC_CMD_LOOKUP_TYPE(x) ((x) << S_FW_VI_MAC_CMD_LOOKUP_TYPE) #define G_FW_VI_MAC_CMD_LOOKUP_TYPE(x) \ (((x) >> S_FW_VI_MAC_CMD_LOOKUP_TYPE) & M_FW_VI_MAC_CMD_LOOKUP_TYPE) #define F_FW_VI_MAC_CMD_LOOKUP_TYPE V_FW_VI_MAC_CMD_LOOKUP_TYPE(1U) #define S_FW_VI_MAC_CMD_DIP_HIT 30 #define M_FW_VI_MAC_CMD_DIP_HIT 0x1 #define V_FW_VI_MAC_CMD_DIP_HIT(x) ((x) << S_FW_VI_MAC_CMD_DIP_HIT) #define G_FW_VI_MAC_CMD_DIP_HIT(x) \ (((x) >> S_FW_VI_MAC_CMD_DIP_HIT) & M_FW_VI_MAC_CMD_DIP_HIT) #define F_FW_VI_MAC_CMD_DIP_HIT V_FW_VI_MAC_CMD_DIP_HIT(1U) #define S_FW_VI_MAC_CMD_VNI 0 #define M_FW_VI_MAC_CMD_VNI 0xffffff #define V_FW_VI_MAC_CMD_VNI(x) ((x) << S_FW_VI_MAC_CMD_VNI) #define G_FW_VI_MAC_CMD_VNI(x) \ (((x) >> S_FW_VI_MAC_CMD_VNI) & M_FW_VI_MAC_CMD_VNI) /* Extracting loopback port number passed from driver. * as a part of fw_vi_mac_vni For non loopback entries * ignore the field and update port number from flowc. * Fw will ignore if physical port number received. * expected range (4-7). */ #define S_FW_VI_MAC_CMD_PORT 24 #define M_FW_VI_MAC_CMD_PORT 0x7 #define V_FW_VI_MAC_CMD_PORT(x) ((x) << S_FW_VI_MAC_CMD_PORT) #define G_FW_VI_MAC_CMD_PORT(x) \ (((x) >> S_FW_VI_MAC_CMD_PORT) & M_FW_VI_MAC_CMD_PORT) #define S_FW_VI_MAC_CMD_VNI_MASK 0 #define M_FW_VI_MAC_CMD_VNI_MASK 0xffffff #define V_FW_VI_MAC_CMD_VNI_MASK(x) ((x) << S_FW_VI_MAC_CMD_VNI_MASK) #define G_FW_VI_MAC_CMD_VNI_MASK(x) \ (((x) >> S_FW_VI_MAC_CMD_VNI_MASK) & M_FW_VI_MAC_CMD_VNI_MASK) /* T4 max MTU supported */ #define T4_MAX_MTU_SUPPORTED 9600 #define FW_RXMODE_MTU_NO_CHG 65535 struct fw_vi_rxmode_cmd { __be32 op_to_viid; __be32 retval_len16; __be32 mtu_to_vlanexen; __be32 r4_lo; }; #define S_FW_VI_RXMODE_CMD_VIID 0 #define M_FW_VI_RXMODE_CMD_VIID 0xfff #define V_FW_VI_RXMODE_CMD_VIID(x) ((x) << S_FW_VI_RXMODE_CMD_VIID) #define G_FW_VI_RXMODE_CMD_VIID(x) \ (((x) >> S_FW_VI_RXMODE_CMD_VIID) & M_FW_VI_RXMODE_CMD_VIID) #define S_FW_VI_RXMODE_CMD_MTU 16 #define M_FW_VI_RXMODE_CMD_MTU 0xffff #define V_FW_VI_RXMODE_CMD_MTU(x) ((x) << S_FW_VI_RXMODE_CMD_MTU) #define G_FW_VI_RXMODE_CMD_MTU(x) \ (((x) >> S_FW_VI_RXMODE_CMD_MTU) & M_FW_VI_RXMODE_CMD_MTU) #define S_FW_VI_RXMODE_CMD_PROMISCEN 14 #define M_FW_VI_RXMODE_CMD_PROMISCEN 0x3 #define V_FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << S_FW_VI_RXMODE_CMD_PROMISCEN) #define G_FW_VI_RXMODE_CMD_PROMISCEN(x) \ (((x) >> S_FW_VI_RXMODE_CMD_PROMISCEN) & M_FW_VI_RXMODE_CMD_PROMISCEN) #define S_FW_VI_RXMODE_CMD_ALLMULTIEN 12 #define M_FW_VI_RXMODE_CMD_ALLMULTIEN 0x3 #define V_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \ ((x) << S_FW_VI_RXMODE_CMD_ALLMULTIEN) #define G_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \ (((x) >> S_FW_VI_RXMODE_CMD_ALLMULTIEN) & M_FW_VI_RXMODE_CMD_ALLMULTIEN) #define S_FW_VI_RXMODE_CMD_BROADCASTEN 10 #define M_FW_VI_RXMODE_CMD_BROADCASTEN 0x3 #define V_FW_VI_RXMODE_CMD_BROADCASTEN(x) \ ((x) << S_FW_VI_RXMODE_CMD_BROADCASTEN) #define G_FW_VI_RXMODE_CMD_BROADCASTEN(x) \ (((x) >> S_FW_VI_RXMODE_CMD_BROADCASTEN) & M_FW_VI_RXMODE_CMD_BROADCASTEN) #define S_FW_VI_RXMODE_CMD_VLANEXEN 8 #define M_FW_VI_RXMODE_CMD_VLANEXEN 0x3 #define V_FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << S_FW_VI_RXMODE_CMD_VLANEXEN) #define G_FW_VI_RXMODE_CMD_VLANEXEN(x) \ (((x) >> S_FW_VI_RXMODE_CMD_VLANEXEN) & M_FW_VI_RXMODE_CMD_VLANEXEN) struct fw_vi_enable_cmd { __be32 op_to_viid; __be32 ien_to_len16; __be16 blinkdur; __be16 r3; __be32 r4; }; #define S_FW_VI_ENABLE_CMD_VIID 0 #define M_FW_VI_ENABLE_CMD_VIID 0xfff #define V_FW_VI_ENABLE_CMD_VIID(x) ((x) << S_FW_VI_ENABLE_CMD_VIID) #define G_FW_VI_ENABLE_CMD_VIID(x) \ (((x) >> S_FW_VI_ENABLE_CMD_VIID) & M_FW_VI_ENABLE_CMD_VIID) #define S_FW_VI_ENABLE_CMD_IEN 31 #define M_FW_VI_ENABLE_CMD_IEN 0x1 #define V_FW_VI_ENABLE_CMD_IEN(x) ((x) << S_FW_VI_ENABLE_CMD_IEN) #define G_FW_VI_ENABLE_CMD_IEN(x) \ (((x) >> S_FW_VI_ENABLE_CMD_IEN) & M_FW_VI_ENABLE_CMD_IEN) #define F_FW_VI_ENABLE_CMD_IEN V_FW_VI_ENABLE_CMD_IEN(1U) #define S_FW_VI_ENABLE_CMD_EEN 30 #define M_FW_VI_ENABLE_CMD_EEN 0x1 #define V_FW_VI_ENABLE_CMD_EEN(x) ((x) << S_FW_VI_ENABLE_CMD_EEN) #define G_FW_VI_ENABLE_CMD_EEN(x) \ (((x) >> S_FW_VI_ENABLE_CMD_EEN) & M_FW_VI_ENABLE_CMD_EEN) #define F_FW_VI_ENABLE_CMD_EEN V_FW_VI_ENABLE_CMD_EEN(1U) #define S_FW_VI_ENABLE_CMD_LED 29 #define M_FW_VI_ENABLE_CMD_LED 0x1 #define V_FW_VI_ENABLE_CMD_LED(x) ((x) << S_FW_VI_ENABLE_CMD_LED) #define G_FW_VI_ENABLE_CMD_LED(x) \ (((x) >> S_FW_VI_ENABLE_CMD_LED) & M_FW_VI_ENABLE_CMD_LED) #define F_FW_VI_ENABLE_CMD_LED V_FW_VI_ENABLE_CMD_LED(1U) #define S_FW_VI_ENABLE_CMD_DCB_INFO 28 #define M_FW_VI_ENABLE_CMD_DCB_INFO 0x1 #define V_FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << S_FW_VI_ENABLE_CMD_DCB_INFO) #define G_FW_VI_ENABLE_CMD_DCB_INFO(x) \ (((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO) #define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U) /* VI VF stats offset definitions */ #define VI_VF_NUM_STATS 16 enum fw_vi_stats_vf_index { FW_VI_VF_STAT_TX_BCAST_BYTES_IX, FW_VI_VF_STAT_TX_BCAST_FRAMES_IX, FW_VI_VF_STAT_TX_MCAST_BYTES_IX, FW_VI_VF_STAT_TX_MCAST_FRAMES_IX, FW_VI_VF_STAT_TX_UCAST_BYTES_IX, FW_VI_VF_STAT_TX_UCAST_FRAMES_IX, FW_VI_VF_STAT_TX_DROP_FRAMES_IX, FW_VI_VF_STAT_TX_OFLD_BYTES_IX, FW_VI_VF_STAT_TX_OFLD_FRAMES_IX, FW_VI_VF_STAT_RX_BCAST_BYTES_IX, FW_VI_VF_STAT_RX_BCAST_FRAMES_IX, FW_VI_VF_STAT_RX_MCAST_BYTES_IX, FW_VI_VF_STAT_RX_MCAST_FRAMES_IX, FW_VI_VF_STAT_RX_UCAST_BYTES_IX, FW_VI_VF_STAT_RX_UCAST_FRAMES_IX, FW_VI_VF_STAT_RX_ERR_FRAMES_IX }; /* VI PF stats offset definitions */ #define VI_PF_NUM_STATS 17 enum fw_vi_stats_pf_index { FW_VI_PF_STAT_TX_BCAST_BYTES_IX, FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, FW_VI_PF_STAT_TX_MCAST_BYTES_IX, FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, FW_VI_PF_STAT_TX_UCAST_BYTES_IX, FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, FW_VI_PF_STAT_TX_OFLD_BYTES_IX, FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, FW_VI_PF_STAT_RX_BYTES_IX, FW_VI_PF_STAT_RX_FRAMES_IX, FW_VI_PF_STAT_RX_BCAST_BYTES_IX, FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, FW_VI_PF_STAT_RX_MCAST_BYTES_IX, FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, FW_VI_PF_STAT_RX_UCAST_BYTES_IX, FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, FW_VI_PF_STAT_RX_ERR_FRAMES_IX }; struct fw_vi_stats_cmd { __be32 op_to_viid; __be32 retval_len16; union fw_vi_stats { struct fw_vi_stats_ctl { __be16 nstats_ix; __be16 r6; __be32 r7; __be64 stat0; __be64 stat1; __be64 stat2; __be64 stat3; __be64 stat4; __be64 stat5; } ctl; struct fw_vi_stats_pf { __be64 tx_bcast_bytes; __be64 tx_bcast_frames; __be64 tx_mcast_bytes; __be64 tx_mcast_frames; __be64 tx_ucast_bytes; __be64 tx_ucast_frames; __be64 tx_offload_bytes; __be64 tx_offload_frames; __be64 rx_pf_bytes; __be64 rx_pf_frames; __be64 rx_bcast_bytes; __be64 rx_bcast_frames; __be64 rx_mcast_bytes; __be64 rx_mcast_frames; __be64 rx_ucast_bytes; __be64 rx_ucast_frames; __be64 rx_err_frames; } pf; struct fw_vi_stats_vf { __be64 tx_bcast_bytes; __be64 tx_bcast_frames; __be64 tx_mcast_bytes; __be64 tx_mcast_frames; __be64 tx_ucast_bytes; __be64 tx_ucast_frames; __be64 tx_drop_frames; __be64 tx_offload_bytes; __be64 tx_offload_frames; __be64 rx_bcast_bytes; __be64 rx_bcast_frames; __be64 rx_mcast_bytes; __be64 rx_mcast_frames; __be64 rx_ucast_bytes; __be64 rx_ucast_frames; __be64 rx_err_frames; } vf; } u; }; #define S_FW_VI_STATS_CMD_VIID 0 #define M_FW_VI_STATS_CMD_VIID 0xfff #define V_FW_VI_STATS_CMD_VIID(x) ((x) << S_FW_VI_STATS_CMD_VIID) #define G_FW_VI_STATS_CMD_VIID(x) \ (((x) >> S_FW_VI_STATS_CMD_VIID) & M_FW_VI_STATS_CMD_VIID) #define S_FW_VI_STATS_CMD_NSTATS 12 #define M_FW_VI_STATS_CMD_NSTATS 0x7 #define V_FW_VI_STATS_CMD_NSTATS(x) ((x) << S_FW_VI_STATS_CMD_NSTATS) #define G_FW_VI_STATS_CMD_NSTATS(x) \ (((x) >> S_FW_VI_STATS_CMD_NSTATS) & M_FW_VI_STATS_CMD_NSTATS) #define S_FW_VI_STATS_CMD_IX 0 #define M_FW_VI_STATS_CMD_IX 0x1f #define V_FW_VI_STATS_CMD_IX(x) ((x) << S_FW_VI_STATS_CMD_IX) #define G_FW_VI_STATS_CMD_IX(x) \ (((x) >> S_FW_VI_STATS_CMD_IX) & M_FW_VI_STATS_CMD_IX) struct fw_acl_mac_cmd { __be32 op_to_vfn; __be32 en_to_len16; __u8 nmac; __u8 r3[7]; __be16 r4; __u8 macaddr0[6]; __be16 r5; __u8 macaddr1[6]; __be16 r6; __u8 macaddr2[6]; __be16 r7; __u8 macaddr3[6]; }; #define S_FW_ACL_MAC_CMD_PFN 8 #define M_FW_ACL_MAC_CMD_PFN 0x7 #define V_FW_ACL_MAC_CMD_PFN(x) ((x) << S_FW_ACL_MAC_CMD_PFN) #define G_FW_ACL_MAC_CMD_PFN(x) \ (((x) >> S_FW_ACL_MAC_CMD_PFN) & M_FW_ACL_MAC_CMD_PFN) #define S_FW_ACL_MAC_CMD_VFN 0 #define M_FW_ACL_MAC_CMD_VFN 0xff #define V_FW_ACL_MAC_CMD_VFN(x) ((x) << S_FW_ACL_MAC_CMD_VFN) #define G_FW_ACL_MAC_CMD_VFN(x) \ (((x) >> S_FW_ACL_MAC_CMD_VFN) & M_FW_ACL_MAC_CMD_VFN) #define S_FW_ACL_MAC_CMD_EN 31 #define M_FW_ACL_MAC_CMD_EN 0x1 #define V_FW_ACL_MAC_CMD_EN(x) ((x) << S_FW_ACL_MAC_CMD_EN) #define G_FW_ACL_MAC_CMD_EN(x) \ (((x) >> S_FW_ACL_MAC_CMD_EN) & M_FW_ACL_MAC_CMD_EN) #define F_FW_ACL_MAC_CMD_EN V_FW_ACL_MAC_CMD_EN(1U) struct fw_acl_vlan_cmd { __be32 op_to_vfn; __be32 en_to_len16; __u8 nvlan; __u8 dropnovlan_fm; __u8 r3_lo[6]; __be16 vlanid[16]; }; #define S_FW_ACL_VLAN_CMD_PFN 8 #define M_FW_ACL_VLAN_CMD_PFN 0x7 #define V_FW_ACL_VLAN_CMD_PFN(x) ((x) << S_FW_ACL_VLAN_CMD_PFN) #define G_FW_ACL_VLAN_CMD_PFN(x) \ (((x) >> S_FW_ACL_VLAN_CMD_PFN) & M_FW_ACL_VLAN_CMD_PFN) #define S_FW_ACL_VLAN_CMD_VFN 0 #define M_FW_ACL_VLAN_CMD_VFN 0xff #define V_FW_ACL_VLAN_CMD_VFN(x) ((x) << S_FW_ACL_VLAN_CMD_VFN) #define G_FW_ACL_VLAN_CMD_VFN(x) \ (((x) >> S_FW_ACL_VLAN_CMD_VFN) & M_FW_ACL_VLAN_CMD_VFN) #define S_FW_ACL_VLAN_CMD_EN 31 #define M_FW_ACL_VLAN_CMD_EN 0x1 #define V_FW_ACL_VLAN_CMD_EN(x) ((x) << S_FW_ACL_VLAN_CMD_EN) #define G_FW_ACL_VLAN_CMD_EN(x) \ (((x) >> S_FW_ACL_VLAN_CMD_EN) & M_FW_ACL_VLAN_CMD_EN) #define F_FW_ACL_VLAN_CMD_EN V_FW_ACL_VLAN_CMD_EN(1U) #define S_FW_ACL_VLAN_CMD_TRANSPARENT 30 #define M_FW_ACL_VLAN_CMD_TRANSPARENT 0x1 #define V_FW_ACL_VLAN_CMD_TRANSPARENT(x) \ ((x) << S_FW_ACL_VLAN_CMD_TRANSPARENT) #define G_FW_ACL_VLAN_CMD_TRANSPARENT(x) \ (((x) >> S_FW_ACL_VLAN_CMD_TRANSPARENT) & M_FW_ACL_VLAN_CMD_TRANSPARENT) #define F_FW_ACL_VLAN_CMD_TRANSPARENT V_FW_ACL_VLAN_CMD_TRANSPARENT(1U) #define S_FW_ACL_VLAN_CMD_PMASK 16 #define M_FW_ACL_VLAN_CMD_PMASK 0xf #define V_FW_ACL_VLAN_CMD_PMASK(x) ((x) << S_FW_ACL_VLAN_CMD_PMASK) #define G_FW_ACL_VLAN_CMD_PMASK(x) \ (((x) >> S_FW_ACL_VLAN_CMD_PMASK) & M_FW_ACL_VLAN_CMD_PMASK) #define S_FW_ACL_VLAN_CMD_DROPNOVLAN 7 #define M_FW_ACL_VLAN_CMD_DROPNOVLAN 0x1 #define V_FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << S_FW_ACL_VLAN_CMD_DROPNOVLAN) #define G_FW_ACL_VLAN_CMD_DROPNOVLAN(x) \ (((x) >> S_FW_ACL_VLAN_CMD_DROPNOVLAN) & M_FW_ACL_VLAN_CMD_DROPNOVLAN) #define F_FW_ACL_VLAN_CMD_DROPNOVLAN V_FW_ACL_VLAN_CMD_DROPNOVLAN(1U) #define S_FW_ACL_VLAN_CMD_FM 6 #define M_FW_ACL_VLAN_CMD_FM 0x1 #define V_FW_ACL_VLAN_CMD_FM(x) ((x) << S_FW_ACL_VLAN_CMD_FM) #define G_FW_ACL_VLAN_CMD_FM(x) \ (((x) >> S_FW_ACL_VLAN_CMD_FM) & M_FW_ACL_VLAN_CMD_FM) #define F_FW_ACL_VLAN_CMD_FM V_FW_ACL_VLAN_CMD_FM(1U) /* old 16-bit port capabilities bitmap (fw_port_cap16_t) */ enum fw_port_cap { FW_PORT_CAP_SPEED_100M = 0x0001, FW_PORT_CAP_SPEED_1G = 0x0002, FW_PORT_CAP_SPEED_25G = 0x0004, FW_PORT_CAP_SPEED_10G = 0x0008, FW_PORT_CAP_SPEED_40G = 0x0010, FW_PORT_CAP_SPEED_100G = 0x0020, FW_PORT_CAP_FC_RX = 0x0040, FW_PORT_CAP_FC_TX = 0x0080, FW_PORT_CAP_ANEG = 0x0100, FW_PORT_CAP_MDIAUTO = 0x0200, FW_PORT_CAP_MDISTRAIGHT = 0x0400, FW_PORT_CAP_FEC_RS = 0x0800, FW_PORT_CAP_FEC_BASER_RS = 0x1000, FW_PORT_CAP_FORCE_PAUSE = 0x2000, FW_PORT_CAP_802_3_PAUSE = 0x4000, FW_PORT_CAP_802_3_ASM_DIR = 0x8000, }; #define S_FW_PORT_CAP_SPEED 0 #define M_FW_PORT_CAP_SPEED 0x3f #define V_FW_PORT_CAP_SPEED(x) ((x) << S_FW_PORT_CAP_SPEED) #define G_FW_PORT_CAP_SPEED(x) \ (((x) >> S_FW_PORT_CAP_SPEED) & M_FW_PORT_CAP_SPEED) #define S_FW_PORT_CAP_FC 6 #define M_FW_PORT_CAP_FC 0x3 #define V_FW_PORT_CAP_FC(x) ((x) << S_FW_PORT_CAP_FC) #define G_FW_PORT_CAP_FC(x) \ (((x) >> S_FW_PORT_CAP_FC) & M_FW_PORT_CAP_FC) #define S_FW_PORT_CAP_ANEG 8 #define M_FW_PORT_CAP_ANEG 0x1 #define V_FW_PORT_CAP_ANEG(x) ((x) << S_FW_PORT_CAP_ANEG) #define G_FW_PORT_CAP_ANEG(x) \ (((x) >> S_FW_PORT_CAP_ANEG) & M_FW_PORT_CAP_ANEG) #define S_FW_PORT_CAP_FEC 11 #define M_FW_PORT_CAP_FEC 0x3 #define V_FW_PORT_CAP_FEC(x) ((x) << S_FW_PORT_CAP_FEC) #define G_FW_PORT_CAP_FEC(x) \ (((x) >> S_FW_PORT_CAP_FEC) & M_FW_PORT_CAP_FEC) #define S_FW_PORT_CAP_FORCE_PAUSE 13 #define M_FW_PORT_CAP_FORCE_PAUSE 0x1 #define V_FW_PORT_CAP_FORCE_PAUSE(x) ((x) << S_FW_PORT_CAP_FORCE_PAUSE) #define G_FW_PORT_CAP_FORCE_PAUSE(x) \ (((x) >> S_FW_PORT_CAP_FORCE_PAUSE) & M_FW_PORT_CAP_FORCE_PAUSE) #define S_FW_PORT_CAP_802_3 14 #define M_FW_PORT_CAP_802_3 0x3 #define V_FW_PORT_CAP_802_3(x) ((x) << S_FW_PORT_CAP_802_3) #define G_FW_PORT_CAP_802_3(x) \ (((x) >> S_FW_PORT_CAP_802_3) & M_FW_PORT_CAP_802_3) enum fw_port_mdi { FW_PORT_CAP_MDI_UNCHANGED, FW_PORT_CAP_MDI_AUTO, FW_PORT_CAP_MDI_F_STRAIGHT, FW_PORT_CAP_MDI_F_CROSSOVER }; #define S_FW_PORT_CAP_MDI 9 #define M_FW_PORT_CAP_MDI 3 #define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI) #define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI) /* new 32-bit port capabilities bitmap (fw_port_cap32_t) */ #define FW_PORT_CAP32_SPEED_100M 0x00000001UL #define FW_PORT_CAP32_SPEED_1G 0x00000002UL #define FW_PORT_CAP32_SPEED_10G 0x00000004UL #define FW_PORT_CAP32_SPEED_25G 0x00000008UL #define FW_PORT_CAP32_SPEED_40G 0x00000010UL #define FW_PORT_CAP32_SPEED_50G 0x00000020UL #define FW_PORT_CAP32_SPEED_100G 0x00000040UL #define FW_PORT_CAP32_SPEED_200G 0x00000080UL #define FW_PORT_CAP32_SPEED_400G 0x00000100UL #define FW_PORT_CAP32_SPEED_RESERVED1 0x00000200UL #define FW_PORT_CAP32_SPEED_RESERVED2 0x00000400UL #define FW_PORT_CAP32_SPEED_RESERVED3 0x00000800UL #define FW_PORT_CAP32_RESERVED1 0x0000f000UL #define FW_PORT_CAP32_FC_RX 0x00010000UL #define FW_PORT_CAP32_FC_TX 0x00020000UL #define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL #define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL #define FW_PORT_CAP32_ANEG 0x00100000UL #define FW_PORT_CAP32_MDIAUTO 0x00200000UL #define FW_PORT_CAP32_MDISTRAIGHT 0x00400000UL #define FW_PORT_CAP32_FEC_RS 0x00800000UL #define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL #define FW_PORT_CAP32_FEC_RESERVED1 0x02000000UL #define FW_PORT_CAP32_FEC_RESERVED2 0x04000000UL #define FW_PORT_CAP32_FEC_RESERVED3 0x08000000UL #define FW_PORT_CAP32_FORCE_PAUSE 0x10000000UL #define FW_PORT_CAP32_RESERVED2 0xe0000000UL #define S_FW_PORT_CAP32_SPEED 0 #define M_FW_PORT_CAP32_SPEED 0xfff #define V_FW_PORT_CAP32_SPEED(x) ((x) << S_FW_PORT_CAP32_SPEED) #define G_FW_PORT_CAP32_SPEED(x) \ (((x) >> S_FW_PORT_CAP32_SPEED) & M_FW_PORT_CAP32_SPEED) #define S_FW_PORT_CAP32_FC 16 #define M_FW_PORT_CAP32_FC 0x3 #define V_FW_PORT_CAP32_FC(x) ((x) << S_FW_PORT_CAP32_FC) #define G_FW_PORT_CAP32_FC(x) \ (((x) >> S_FW_PORT_CAP32_FC) & M_FW_PORT_CAP32_FC) #define S_FW_PORT_CAP32_802_3 18 #define M_FW_PORT_CAP32_802_3 0x3 #define V_FW_PORT_CAP32_802_3(x) ((x) << S_FW_PORT_CAP32_802_3) #define G_FW_PORT_CAP32_802_3(x) \ (((x) >> S_FW_PORT_CAP32_802_3) & M_FW_PORT_CAP32_802_3) #define S_FW_PORT_CAP32_ANEG 20 #define M_FW_PORT_CAP32_ANEG 0x1 #define V_FW_PORT_CAP32_ANEG(x) ((x) << S_FW_PORT_CAP32_ANEG) #define G_FW_PORT_CAP32_ANEG(x) \ (((x) >> S_FW_PORT_CAP32_ANEG) & M_FW_PORT_CAP32_ANEG) #define S_FW_PORT_CAP32_FORCE_PAUSE 28 #define M_FW_PORT_CAP32_FORCE_PAUSE 0x1 #define V_FW_PORT_CAP32_FORCE_PAUSE(x) ((x) << S_FW_PORT_CAP32_FORCE_PAUSE) #define G_FW_PORT_CAP32_FORCE_PAUSE(x) \ (((x) >> S_FW_PORT_CAP32_FORCE_PAUSE) & M_FW_PORT_CAP32_FORCE_PAUSE) enum fw_port_mdi32 { FW_PORT_CAP32_MDI_UNCHANGED, FW_PORT_CAP32_MDI_AUTO, FW_PORT_CAP32_MDI_F_STRAIGHT, FW_PORT_CAP32_MDI_F_CROSSOVER }; #define S_FW_PORT_CAP32_MDI 21 #define M_FW_PORT_CAP32_MDI 3 #define V_FW_PORT_CAP32_MDI(x) ((x) << S_FW_PORT_CAP32_MDI) #define G_FW_PORT_CAP32_MDI(x) \ (((x) >> S_FW_PORT_CAP32_MDI) & M_FW_PORT_CAP32_MDI) #define S_FW_PORT_CAP32_FEC 23 #define M_FW_PORT_CAP32_FEC 0x1f #define V_FW_PORT_CAP32_FEC(x) ((x) << S_FW_PORT_CAP32_FEC) #define G_FW_PORT_CAP32_FEC(x) \ (((x) >> S_FW_PORT_CAP32_FEC) & M_FW_PORT_CAP32_FEC) /* macros to isolate various 32-bit Port Capabilities sub-fields */ #define CAP32_SPEED(__cap32) \ (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) & __cap32) #define CAP32_FEC(__cap32) \ (V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC) & __cap32) #define CAP32_FC(__cap32) \ (V_FW_PORT_CAP32_FC(M_FW_PORT_CAP32_FC) & __cap32) enum fw_port_action { FW_PORT_ACTION_L1_CFG = 0x0001, FW_PORT_ACTION_L2_CFG = 0x0002, FW_PORT_ACTION_GET_PORT_INFO = 0x0003, FW_PORT_ACTION_L2_PPP_CFG = 0x0004, FW_PORT_ACTION_L2_DCB_CFG = 0x0005, FW_PORT_ACTION_DCB_READ_TRANS = 0x0006, FW_PORT_ACTION_DCB_READ_RECV = 0x0007, FW_PORT_ACTION_DCB_READ_DET = 0x0008, FW_PORT_ACTION_L1_CFG32 = 0x0009, FW_PORT_ACTION_GET_PORT_INFO32 = 0x000a, FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010, FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011, FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012, FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020, FW_PORT_ACTION_LPBK_SS_ASIC = 0x0022, FW_PORT_ACTION_LPBK_WS_ASIC = 0x0023, FW_PORT_ACTION_LPBK_WS_EXT_PHY = 0x0025, FW_PORT_ACTION_LPBK_SS_EXT = 0x0026, FW_PORT_ACTION_DIAGNOSTICS = 0x0027, FW_PORT_ACTION_LPBK_SS_EXT_PHY = 0x0028, FW_PORT_ACTION_PHY_RESET = 0x0040, FW_PORT_ACTION_PMA_RESET = 0x0041, FW_PORT_ACTION_PCS_RESET = 0x0042, FW_PORT_ACTION_PHYXS_RESET = 0x0043, FW_PORT_ACTION_DTEXS_REEST = 0x0044, FW_PORT_ACTION_AN_RESET = 0x0045, }; enum fw_port_l2cfg_ctlbf { FW_PORT_L2_CTLBF_OVLAN0 = 0x01, FW_PORT_L2_CTLBF_OVLAN1 = 0x02, FW_PORT_L2_CTLBF_OVLAN2 = 0x04, FW_PORT_L2_CTLBF_OVLAN3 = 0x08, FW_PORT_L2_CTLBF_IVLAN = 0x10, FW_PORT_L2_CTLBF_TXIPG = 0x20, FW_PORT_L2_CTLBF_MTU = 0x40 }; enum fw_dcb_app_tlv_sf { FW_DCB_APP_SF_ETHERTYPE, FW_DCB_APP_SF_SOCKET_TCP, FW_DCB_APP_SF_SOCKET_UDP, FW_DCB_APP_SF_SOCKET_ALL, }; enum fw_port_dcb_versions { FW_PORT_DCB_VER_UNKNOWN, FW_PORT_DCB_VER_CEE1D0, FW_PORT_DCB_VER_CEE1D01, FW_PORT_DCB_VER_IEEE, FW_PORT_DCB_VER_AUTO=7 }; enum fw_port_dcb_cfg { FW_PORT_DCB_CFG_PG = 0x01, FW_PORT_DCB_CFG_PFC = 0x02, FW_PORT_DCB_CFG_APPL = 0x04 }; enum fw_port_dcb_cfg_rc { FW_PORT_DCB_CFG_SUCCESS = 0x0, FW_PORT_DCB_CFG_ERROR = 0x1 }; enum fw_port_dcb_type { FW_PORT_DCB_TYPE_PGID = 0x00, FW_PORT_DCB_TYPE_PGRATE = 0x01, FW_PORT_DCB_TYPE_PRIORATE = 0x02, FW_PORT_DCB_TYPE_PFC = 0x03, FW_PORT_DCB_TYPE_APP_ID = 0x04, FW_PORT_DCB_TYPE_CONTROL = 0x05, }; enum fw_port_dcb_feature_state { FW_PORT_DCB_FEATURE_STATE_PENDING = 0x0, FW_PORT_DCB_FEATURE_STATE_SUCCESS = 0x1, FW_PORT_DCB_FEATURE_STATE_ERROR = 0x2, FW_PORT_DCB_FEATURE_STATE_TIMEOUT = 0x3, }; enum fw_port_diag_ops { FW_PORT_DIAGS_TEMP = 0x00, FW_PORT_DIAGS_TX_POWER = 0x01, FW_PORT_DIAGS_RX_POWER = 0x02, FW_PORT_DIAGS_TX_DIS = 0x03, }; struct fw_port_cmd { __be32 op_to_portid; __be32 action_to_len16; union fw_port { struct fw_port_l1cfg { __be32 rcap; __be32 r; } l1cfg; struct fw_port_l2cfg { __u8 ctlbf; __u8 ovlan3_to_ivlan0; __be16 ivlantype; __be16 txipg_force_pinfo; __be16 mtu; __be16 ovlan0mask; __be16 ovlan0type; __be16 ovlan1mask; __be16 ovlan1type; __be16 ovlan2mask; __be16 ovlan2type; __be16 ovlan3mask; __be16 ovlan3type; } l2cfg; struct fw_port_info { __be32 lstatus_to_modtype; __be16 pcap; __be16 acap; __be16 mtu; __u8 cbllen; __u8 auxlinfo; __u8 dcbxdis_pkd; __u8 r8_lo; __be16 lpacap; __be64 r9; } info; struct fw_port_diags { __u8 diagop; __u8 r[3]; __be32 diagval; } diags; union fw_port_dcb { struct fw_port_dcb_pgid { __u8 type; __u8 apply_pkd; __u8 r10_lo[2]; __be32 pgid; __be64 r11; } pgid; struct fw_port_dcb_pgrate { __u8 type; __u8 apply_pkd; __u8 r10_lo[5]; __u8 num_tcs_supported; __u8 pgrate[8]; __u8 tsa[8]; } pgrate; struct fw_port_dcb_priorate { __u8 type; __u8 apply_pkd; __u8 r10_lo[6]; __u8 strict_priorate[8]; } priorate; struct fw_port_dcb_pfc { __u8 type; __u8 pfcen; __u8 apply_pkd; __u8 r10_lo[4]; __u8 max_pfc_tcs; __be64 r11; } pfc; struct fw_port_app_priority { __u8 type; __u8 apply_pkd; __u8 r10_lo; __u8 idx; __u8 user_prio_map; __u8 sel_field; __be16 protocolid; __be64 r12; } app_priority; struct fw_port_dcb_control { __u8 type; __u8 all_syncd_pkd; __be16 dcb_version_to_app_state; __be32 r11; __be64 r12; } control; } dcb; struct fw_port_l1cfg32 { __be32 rcap32; __be32 r; } l1cfg32; struct fw_port_info32 { __be32 lstatus32_to_cbllen32; __be32 auxlinfo32_mtu32; __be32 linkattr32; __be32 pcaps32; __be32 acaps32; __be32 lpacaps32; } info32; } u; }; #define S_FW_PORT_CMD_READ 22 #define M_FW_PORT_CMD_READ 0x1 #define V_FW_PORT_CMD_READ(x) ((x) << S_FW_PORT_CMD_READ) #define G_FW_PORT_CMD_READ(x) \ (((x) >> S_FW_PORT_CMD_READ) & M_FW_PORT_CMD_READ) #define F_FW_PORT_CMD_READ V_FW_PORT_CMD_READ(1U) #define S_FW_PORT_CMD_PORTID 0 #define M_FW_PORT_CMD_PORTID 0xf #define V_FW_PORT_CMD_PORTID(x) ((x) << S_FW_PORT_CMD_PORTID) #define G_FW_PORT_CMD_PORTID(x) \ (((x) >> S_FW_PORT_CMD_PORTID) & M_FW_PORT_CMD_PORTID) #define S_FW_PORT_CMD_ACTION 16 #define M_FW_PORT_CMD_ACTION 0xffff #define V_FW_PORT_CMD_ACTION(x) ((x) << S_FW_PORT_CMD_ACTION) #define G_FW_PORT_CMD_ACTION(x) \ (((x) >> S_FW_PORT_CMD_ACTION) & M_FW_PORT_CMD_ACTION) #define S_FW_PORT_CMD_OVLAN3 7 #define M_FW_PORT_CMD_OVLAN3 0x1 #define V_FW_PORT_CMD_OVLAN3(x) ((x) << S_FW_PORT_CMD_OVLAN3) #define G_FW_PORT_CMD_OVLAN3(x) \ (((x) >> S_FW_PORT_CMD_OVLAN3) & M_FW_PORT_CMD_OVLAN3) #define F_FW_PORT_CMD_OVLAN3 V_FW_PORT_CMD_OVLAN3(1U) #define S_FW_PORT_CMD_OVLAN2 6 #define M_FW_PORT_CMD_OVLAN2 0x1 #define V_FW_PORT_CMD_OVLAN2(x) ((x) << S_FW_PORT_CMD_OVLAN2) #define G_FW_PORT_CMD_OVLAN2(x) \ (((x) >> S_FW_PORT_CMD_OVLAN2) & M_FW_PORT_CMD_OVLAN2) #define F_FW_PORT_CMD_OVLAN2 V_FW_PORT_CMD_OVLAN2(1U) #define S_FW_PORT_CMD_OVLAN1 5 #define M_FW_PORT_CMD_OVLAN1 0x1 #define V_FW_PORT_CMD_OVLAN1(x) ((x) << S_FW_PORT_CMD_OVLAN1) #define G_FW_PORT_CMD_OVLAN1(x) \ (((x) >> S_FW_PORT_CMD_OVLAN1) & M_FW_PORT_CMD_OVLAN1) #define F_FW_PORT_CMD_OVLAN1 V_FW_PORT_CMD_OVLAN1(1U) #define S_FW_PORT_CMD_OVLAN0 4 #define M_FW_PORT_CMD_OVLAN0 0x1 #define V_FW_PORT_CMD_OVLAN0(x) ((x) << S_FW_PORT_CMD_OVLAN0) #define G_FW_PORT_CMD_OVLAN0(x) \ (((x) >> S_FW_PORT_CMD_OVLAN0) & M_FW_PORT_CMD_OVLAN0) #define F_FW_PORT_CMD_OVLAN0 V_FW_PORT_CMD_OVLAN0(1U) #define S_FW_PORT_CMD_IVLAN0 3 #define M_FW_PORT_CMD_IVLAN0 0x1 #define V_FW_PORT_CMD_IVLAN0(x) ((x) << S_FW_PORT_CMD_IVLAN0) #define G_FW_PORT_CMD_IVLAN0(x) \ (((x) >> S_FW_PORT_CMD_IVLAN0) & M_FW_PORT_CMD_IVLAN0) #define F_FW_PORT_CMD_IVLAN0 V_FW_PORT_CMD_IVLAN0(1U) #define S_FW_PORT_CMD_TXIPG 3 #define M_FW_PORT_CMD_TXIPG 0x1fff #define V_FW_PORT_CMD_TXIPG(x) ((x) << S_FW_PORT_CMD_TXIPG) #define G_FW_PORT_CMD_TXIPG(x) \ (((x) >> S_FW_PORT_CMD_TXIPG) & M_FW_PORT_CMD_TXIPG) #define S_FW_PORT_CMD_FORCE_PINFO 0 #define M_FW_PORT_CMD_FORCE_PINFO 0x1 #define V_FW_PORT_CMD_FORCE_PINFO(x) ((x) << S_FW_PORT_CMD_FORCE_PINFO) #define G_FW_PORT_CMD_FORCE_PINFO(x) \ (((x) >> S_FW_PORT_CMD_FORCE_PINFO) & M_FW_PORT_CMD_FORCE_PINFO) #define F_FW_PORT_CMD_FORCE_PINFO V_FW_PORT_CMD_FORCE_PINFO(1U) #define S_FW_PORT_CMD_LSTATUS 31 #define M_FW_PORT_CMD_LSTATUS 0x1 #define V_FW_PORT_CMD_LSTATUS(x) ((x) << S_FW_PORT_CMD_LSTATUS) #define G_FW_PORT_CMD_LSTATUS(x) \ (((x) >> S_FW_PORT_CMD_LSTATUS) & M_FW_PORT_CMD_LSTATUS) #define F_FW_PORT_CMD_LSTATUS V_FW_PORT_CMD_LSTATUS(1U) #define S_FW_PORT_CMD_LSPEED 24 #define M_FW_PORT_CMD_LSPEED 0x3f #define V_FW_PORT_CMD_LSPEED(x) ((x) << S_FW_PORT_CMD_LSPEED) #define G_FW_PORT_CMD_LSPEED(x) \ (((x) >> S_FW_PORT_CMD_LSPEED) & M_FW_PORT_CMD_LSPEED) #define S_FW_PORT_CMD_TXPAUSE 23 #define M_FW_PORT_CMD_TXPAUSE 0x1 #define V_FW_PORT_CMD_TXPAUSE(x) ((x) << S_FW_PORT_CMD_TXPAUSE) #define G_FW_PORT_CMD_TXPAUSE(x) \ (((x) >> S_FW_PORT_CMD_TXPAUSE) & M_FW_PORT_CMD_TXPAUSE) #define F_FW_PORT_CMD_TXPAUSE V_FW_PORT_CMD_TXPAUSE(1U) #define S_FW_PORT_CMD_RXPAUSE 22 #define M_FW_PORT_CMD_RXPAUSE 0x1 #define V_FW_PORT_CMD_RXPAUSE(x) ((x) << S_FW_PORT_CMD_RXPAUSE) #define G_FW_PORT_CMD_RXPAUSE(x) \ (((x) >> S_FW_PORT_CMD_RXPAUSE) & M_FW_PORT_CMD_RXPAUSE) #define F_FW_PORT_CMD_RXPAUSE V_FW_PORT_CMD_RXPAUSE(1U) #define S_FW_PORT_CMD_MDIOCAP 21 #define M_FW_PORT_CMD_MDIOCAP 0x1 #define V_FW_PORT_CMD_MDIOCAP(x) ((x) << S_FW_PORT_CMD_MDIOCAP) #define G_FW_PORT_CMD_MDIOCAP(x) \ (((x) >> S_FW_PORT_CMD_MDIOCAP) & M_FW_PORT_CMD_MDIOCAP) #define F_FW_PORT_CMD_MDIOCAP V_FW_PORT_CMD_MDIOCAP(1U) #define S_FW_PORT_CMD_MDIOADDR 16 #define M_FW_PORT_CMD_MDIOADDR 0x1f #define V_FW_PORT_CMD_MDIOADDR(x) ((x) << S_FW_PORT_CMD_MDIOADDR) #define G_FW_PORT_CMD_MDIOADDR(x) \ (((x) >> S_FW_PORT_CMD_MDIOADDR) & M_FW_PORT_CMD_MDIOADDR) #define S_FW_PORT_CMD_LPTXPAUSE 15 #define M_FW_PORT_CMD_LPTXPAUSE 0x1 #define V_FW_PORT_CMD_LPTXPAUSE(x) ((x) << S_FW_PORT_CMD_LPTXPAUSE) #define G_FW_PORT_CMD_LPTXPAUSE(x) \ (((x) >> S_FW_PORT_CMD_LPTXPAUSE) & M_FW_PORT_CMD_LPTXPAUSE) #define F_FW_PORT_CMD_LPTXPAUSE V_FW_PORT_CMD_LPTXPAUSE(1U) #define S_FW_PORT_CMD_LPRXPAUSE 14 #define M_FW_PORT_CMD_LPRXPAUSE 0x1 #define V_FW_PORT_CMD_LPRXPAUSE(x) ((x) << S_FW_PORT_CMD_LPRXPAUSE) #define G_FW_PORT_CMD_LPRXPAUSE(x) \ (((x) >> S_FW_PORT_CMD_LPRXPAUSE) & M_FW_PORT_CMD_LPRXPAUSE) #define F_FW_PORT_CMD_LPRXPAUSE V_FW_PORT_CMD_LPRXPAUSE(1U) #define S_FW_PORT_CMD_PTYPE 8 #define M_FW_PORT_CMD_PTYPE 0x1f #define V_FW_PORT_CMD_PTYPE(x) ((x) << S_FW_PORT_CMD_PTYPE) #define G_FW_PORT_CMD_PTYPE(x) \ (((x) >> S_FW_PORT_CMD_PTYPE) & M_FW_PORT_CMD_PTYPE) #define S_FW_PORT_CMD_LINKDNRC 5 #define M_FW_PORT_CMD_LINKDNRC 0x7 #define V_FW_PORT_CMD_LINKDNRC(x) ((x) << S_FW_PORT_CMD_LINKDNRC) #define G_FW_PORT_CMD_LINKDNRC(x) \ (((x) >> S_FW_PORT_CMD_LINKDNRC) & M_FW_PORT_CMD_LINKDNRC) #define S_FW_PORT_CMD_MODTYPE 0 #define M_FW_PORT_CMD_MODTYPE 0x1f #define V_FW_PORT_CMD_MODTYPE(x) ((x) << S_FW_PORT_CMD_MODTYPE) #define G_FW_PORT_CMD_MODTYPE(x) \ (((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE) #define S_FW_PORT_AUXLINFO_KX4 2 #define M_FW_PORT_AUXLINFO_KX4 0x1 #define V_FW_PORT_AUXLINFO_KX4(x) \ ((x) << S_FW_PORT_AUXLINFO_KX4) #define G_FW_PORT_AUXLINFO_KX4(x) \ (((x) >> S_FW_PORT_AUXLINFO_KX4) & M_FW_PORT_AUXLINFO_KX4) #define F_FW_PORT_AUXLINFO_KX4 V_FW_PORT_AUXLINFO_KX4(1U) #define S_FW_PORT_AUXLINFO_KR 1 #define M_FW_PORT_AUXLINFO_KR 0x1 #define V_FW_PORT_AUXLINFO_KR(x) \ ((x) << S_FW_PORT_AUXLINFO_KR) #define G_FW_PORT_AUXLINFO_KR(x) \ (((x) >> S_FW_PORT_AUXLINFO_KR) & M_FW_PORT_AUXLINFO_KR) #define F_FW_PORT_AUXLINFO_KR V_FW_PORT_AUXLINFO_KR(1U) #define S_FW_PORT_CMD_DCBXDIS 7 #define M_FW_PORT_CMD_DCBXDIS 0x1 #define V_FW_PORT_CMD_DCBXDIS(x) ((x) << S_FW_PORT_CMD_DCBXDIS) #define G_FW_PORT_CMD_DCBXDIS(x) \ (((x) >> S_FW_PORT_CMD_DCBXDIS) & M_FW_PORT_CMD_DCBXDIS) #define F_FW_PORT_CMD_DCBXDIS V_FW_PORT_CMD_DCBXDIS(1U) #define S_FW_PORT_CMD_APPLY 7 #define M_FW_PORT_CMD_APPLY 0x1 #define V_FW_PORT_CMD_APPLY(x) ((x) << S_FW_PORT_CMD_APPLY) #define G_FW_PORT_CMD_APPLY(x) \ (((x) >> S_FW_PORT_CMD_APPLY) & M_FW_PORT_CMD_APPLY) #define F_FW_PORT_CMD_APPLY V_FW_PORT_CMD_APPLY(1U) #define S_FW_PORT_CMD_ALL_SYNCD 7 #define M_FW_PORT_CMD_ALL_SYNCD 0x1 #define V_FW_PORT_CMD_ALL_SYNCD(x) ((x) << S_FW_PORT_CMD_ALL_SYNCD) #define G_FW_PORT_CMD_ALL_SYNCD(x) \ (((x) >> S_FW_PORT_CMD_ALL_SYNCD) & M_FW_PORT_CMD_ALL_SYNCD) #define F_FW_PORT_CMD_ALL_SYNCD V_FW_PORT_CMD_ALL_SYNCD(1U) #define S_FW_PORT_CMD_DCB_VERSION 12 #define M_FW_PORT_CMD_DCB_VERSION 0x7 #define V_FW_PORT_CMD_DCB_VERSION(x) ((x) << S_FW_PORT_CMD_DCB_VERSION) #define G_FW_PORT_CMD_DCB_VERSION(x) \ (((x) >> S_FW_PORT_CMD_DCB_VERSION) & M_FW_PORT_CMD_DCB_VERSION) #define S_FW_PORT_CMD_PFC_STATE 8 #define M_FW_PORT_CMD_PFC_STATE 0xf #define V_FW_PORT_CMD_PFC_STATE(x) ((x) << S_FW_PORT_CMD_PFC_STATE) #define G_FW_PORT_CMD_PFC_STATE(x) \ (((x) >> S_FW_PORT_CMD_PFC_STATE) & M_FW_PORT_CMD_PFC_STATE) #define S_FW_PORT_CMD_ETS_STATE 4 #define M_FW_PORT_CMD_ETS_STATE 0xf #define V_FW_PORT_CMD_ETS_STATE(x) ((x) << S_FW_PORT_CMD_ETS_STATE) #define G_FW_PORT_CMD_ETS_STATE(x) \ (((x) >> S_FW_PORT_CMD_ETS_STATE) & M_FW_PORT_CMD_ETS_STATE) #define S_FW_PORT_CMD_APP_STATE 0 #define M_FW_PORT_CMD_APP_STATE 0xf #define V_FW_PORT_CMD_APP_STATE(x) ((x) << S_FW_PORT_CMD_APP_STATE) #define G_FW_PORT_CMD_APP_STATE(x) \ (((x) >> S_FW_PORT_CMD_APP_STATE) & M_FW_PORT_CMD_APP_STATE) #define S_FW_PORT_CMD_LSTATUS32 31 #define M_FW_PORT_CMD_LSTATUS32 0x1 #define V_FW_PORT_CMD_LSTATUS32(x) ((x) << S_FW_PORT_CMD_LSTATUS32) #define G_FW_PORT_CMD_LSTATUS32(x) \ (((x) >> S_FW_PORT_CMD_LSTATUS32) & M_FW_PORT_CMD_LSTATUS32) #define F_FW_PORT_CMD_LSTATUS32 V_FW_PORT_CMD_LSTATUS32(1U) #define S_FW_PORT_CMD_LINKDNRC32 28 #define M_FW_PORT_CMD_LINKDNRC32 0x7 #define V_FW_PORT_CMD_LINKDNRC32(x) ((x) << S_FW_PORT_CMD_LINKDNRC32) #define G_FW_PORT_CMD_LINKDNRC32(x) \ (((x) >> S_FW_PORT_CMD_LINKDNRC32) & M_FW_PORT_CMD_LINKDNRC32) #define S_FW_PORT_CMD_DCBXDIS32 27 #define M_FW_PORT_CMD_DCBXDIS32 0x1 #define V_FW_PORT_CMD_DCBXDIS32(x) ((x) << S_FW_PORT_CMD_DCBXDIS32) #define G_FW_PORT_CMD_DCBXDIS32(x) \ (((x) >> S_FW_PORT_CMD_DCBXDIS32) & M_FW_PORT_CMD_DCBXDIS32) #define F_FW_PORT_CMD_DCBXDIS32 V_FW_PORT_CMD_DCBXDIS32(1U) #define S_FW_PORT_CMD_MDIOCAP32 26 #define M_FW_PORT_CMD_MDIOCAP32 0x1 #define V_FW_PORT_CMD_MDIOCAP32(x) ((x) << S_FW_PORT_CMD_MDIOCAP32) #define G_FW_PORT_CMD_MDIOCAP32(x) \ (((x) >> S_FW_PORT_CMD_MDIOCAP32) & M_FW_PORT_CMD_MDIOCAP32) #define F_FW_PORT_CMD_MDIOCAP32 V_FW_PORT_CMD_MDIOCAP32(1U) #define S_FW_PORT_CMD_MDIOADDR32 21 #define M_FW_PORT_CMD_MDIOADDR32 0x1f #define V_FW_PORT_CMD_MDIOADDR32(x) ((x) << S_FW_PORT_CMD_MDIOADDR32) #define G_FW_PORT_CMD_MDIOADDR32(x) \ (((x) >> S_FW_PORT_CMD_MDIOADDR32) & M_FW_PORT_CMD_MDIOADDR32) #define S_FW_PORT_CMD_PORTTYPE32 13 #define M_FW_PORT_CMD_PORTTYPE32 0xff #define V_FW_PORT_CMD_PORTTYPE32(x) ((x) << S_FW_PORT_CMD_PORTTYPE32) #define G_FW_PORT_CMD_PORTTYPE32(x) \ (((x) >> S_FW_PORT_CMD_PORTTYPE32) & M_FW_PORT_CMD_PORTTYPE32) #define S_FW_PORT_CMD_MODTYPE32 8 #define M_FW_PORT_CMD_MODTYPE32 0x1f #define V_FW_PORT_CMD_MODTYPE32(x) ((x) << S_FW_PORT_CMD_MODTYPE32) #define G_FW_PORT_CMD_MODTYPE32(x) \ (((x) >> S_FW_PORT_CMD_MODTYPE32) & M_FW_PORT_CMD_MODTYPE32) #define S_FW_PORT_CMD_CBLLEN32 0 #define M_FW_PORT_CMD_CBLLEN32 0xff #define V_FW_PORT_CMD_CBLLEN32(x) ((x) << S_FW_PORT_CMD_CBLLEN32) #define G_FW_PORT_CMD_CBLLEN32(x) \ (((x) >> S_FW_PORT_CMD_CBLLEN32) & M_FW_PORT_CMD_CBLLEN32) #define S_FW_PORT_CMD_AUXLINFO32 24 #define M_FW_PORT_CMD_AUXLINFO32 0xff #define V_FW_PORT_CMD_AUXLINFO32(x) ((x) << S_FW_PORT_CMD_AUXLINFO32) #define G_FW_PORT_CMD_AUXLINFO32(x) \ (((x) >> S_FW_PORT_CMD_AUXLINFO32) & M_FW_PORT_CMD_AUXLINFO32) #define S_FW_PORT_AUXLINFO32_KX4 2 #define M_FW_PORT_AUXLINFO32_KX4 0x1 #define V_FW_PORT_AUXLINFO32_KX4(x) \ ((x) << S_FW_PORT_AUXLINFO32_KX4) #define G_FW_PORT_AUXLINFO32_KX4(x) \ (((x) >> S_FW_PORT_AUXLINFO32_KX4) & M_FW_PORT_AUXLINFO32_KX4) #define F_FW_PORT_AUXLINFO32_KX4 V_FW_PORT_AUXLINFO32_KX4(1U) #define S_FW_PORT_AUXLINFO32_KR 1 #define M_FW_PORT_AUXLINFO32_KR 0x1 #define V_FW_PORT_AUXLINFO32_KR(x) \ ((x) << S_FW_PORT_AUXLINFO32_KR) #define G_FW_PORT_AUXLINFO32_KR(x) \ (((x) >> S_FW_PORT_AUXLINFO32_KR) & M_FW_PORT_AUXLINFO32_KR) #define F_FW_PORT_AUXLINFO32_KR V_FW_PORT_AUXLINFO32_KR(1U) #define S_FW_PORT_CMD_MTU32 0 #define M_FW_PORT_CMD_MTU32 0xffff #define V_FW_PORT_CMD_MTU32(x) ((x) << S_FW_PORT_CMD_MTU32) #define G_FW_PORT_CMD_MTU32(x) \ (((x) >> S_FW_PORT_CMD_MTU32) & M_FW_PORT_CMD_MTU32) /* * These are configured into the VPD and hence tools that generate * VPD may use this enumeration. * extPHY #lanes T4_I2C extI2C BP_Eq BP_ANEG Speed * * REMEMBER: * Update the Common Code t4_hw.c:t4_get_port_type_description() * with any new Firmware Port Technology Types! */ enum fw_port_type { FW_PORT_TYPE_FIBER_XFI = 0, /* Y, 1, N, Y, N, N, 10G */ FW_PORT_TYPE_FIBER_XAUI = 1, /* Y, 4, N, Y, N, N, 10G */ FW_PORT_TYPE_BT_SGMII = 2, /* Y, 1, No, No, No, No, 1G/100M */ FW_PORT_TYPE_BT_XFI = 3, /* Y, 1, No, No, No, No, 10G/1G/100M */ FW_PORT_TYPE_BT_XAUI = 4, /* Y, 4, No, No, No, No, 10G/1G/100M */ FW_PORT_TYPE_KX4 = 5, /* No, 4, No, No, Yes, Yes, 10G */ FW_PORT_TYPE_CX4 = 6, /* No, 4, No, No, No, No, 10G */ FW_PORT_TYPE_KX = 7, /* No, 1, No, No, Yes, No, 1G */ FW_PORT_TYPE_KR = 8, /* No, 1, No, No, Yes, Yes, 10G */ FW_PORT_TYPE_SFP = 9, /* No, 1, Yes, No, No, No, 10G */ FW_PORT_TYPE_BP_AP = 10, /* No, 1, No, No, Yes, Yes, 10G, BP ANGE */ FW_PORT_TYPE_BP4_AP = 11, /* No, 4, No, No, Yes, Yes, 10G, BP ANGE */ FW_PORT_TYPE_QSFP_10G = 12, /* No, 1, Yes, No, No, No, 10G */ FW_PORT_TYPE_QSA = 13, /* No, 1, Yes, No, No, No, 10G */ FW_PORT_TYPE_QSFP = 14, /* No, 4, Yes, No, No, No, 40G */ FW_PORT_TYPE_BP40_BA = 15, /* No, 4, No, No, Yes, Yes, 40G/10G/1G, BP ANGE */ FW_PORT_TYPE_KR4_100G = 16, /* No, 4, 100G/40G/25G, Backplane */ FW_PORT_TYPE_CR4_QSFP = 17, /* No, 4, 100G/40G/25G */ FW_PORT_TYPE_CR_QSFP = 18, /* No, 1, 25G Spider cable */ FW_PORT_TYPE_CR2_QSFP = 19, /* No, 2, 50G */ FW_PORT_TYPE_SFP28 = 20, /* No, 1, 25G/10G/1G */ FW_PORT_TYPE_KR_SFP28 = 21, /* No, 1, 25G/10G/1G using Backplane */ FW_PORT_TYPE_KR_XLAUI = 22, /* No, 4, 40G/10G/1G, No AN*/ FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE }; /* These are read from module's EEPROM and determined once the module is inserted. */ enum fw_port_module_type { FW_PORT_MOD_TYPE_NA = 0x0, FW_PORT_MOD_TYPE_LR = 0x1, FW_PORT_MOD_TYPE_SR = 0x2, FW_PORT_MOD_TYPE_ER = 0x3, FW_PORT_MOD_TYPE_TWINAX_PASSIVE = 0x4, FW_PORT_MOD_TYPE_TWINAX_ACTIVE = 0x5, FW_PORT_MOD_TYPE_LRM = 0x6, FW_PORT_MOD_TYPE_ERROR = M_FW_PORT_CMD_MODTYPE - 3, FW_PORT_MOD_TYPE_UNKNOWN = M_FW_PORT_CMD_MODTYPE - 2, FW_PORT_MOD_TYPE_NOTSUPPORTED = M_FW_PORT_CMD_MODTYPE - 1, FW_PORT_MOD_TYPE_NONE = M_FW_PORT_CMD_MODTYPE }; /* used by FW and tools may use this to generate VPD */ enum fw_port_mod_sub_type { FW_PORT_MOD_SUB_TYPE_NA, FW_PORT_MOD_SUB_TYPE_MV88E114X=0x1, FW_PORT_MOD_SUB_TYPE_TN8022=0x2, FW_PORT_MOD_SUB_TYPE_AQ1202=0x3, FW_PORT_MOD_SUB_TYPE_88x3120=0x4, FW_PORT_MOD_SUB_TYPE_BCM84834=0x5, FW_PORT_MOD_SUB_TYPE_BCM5482=0x6, FW_PORT_MOD_SUB_TYPE_BCM84856=0x7, FW_PORT_MOD_SUB_TYPE_BT_VSC8634=0x8, /* * The following will never been in the VPD. They are TWINAX cable * lengths decoded from SFP+ module i2c PROMs. These should almost * certainly go somewhere else ... */ FW_PORT_MOD_SUB_TYPE_TWINAX_1=0x9, FW_PORT_MOD_SUB_TYPE_TWINAX_3=0xA, FW_PORT_MOD_SUB_TYPE_TWINAX_5=0xB, FW_PORT_MOD_SUB_TYPE_TWINAX_7=0xC, }; /* link down reason codes (3b) */ enum fw_port_link_dn_rc { FW_PORT_LINK_DN_RC_NONE, FW_PORT_LINK_DN_RC_REMFLT, /* Remote fault detected */ FW_PORT_LINK_DN_ANEG_F, /* Auto-negotiation fault */ FW_PORT_LINK_DN_RESERVED3, FW_PORT_LINK_DN_OVERHEAT, /* Port overheated */ FW_PORT_LINK_DN_UNKNOWN, /* Unable to determine reason */ FW_PORT_LINK_DN_RX_LOS, /* No RX signal detected */ FW_PORT_LINK_DN_RESERVED7 }; enum fw_port_stats_tx_index { FW_STAT_TX_PORT_BYTES_IX = 0, FW_STAT_TX_PORT_FRAMES_IX, FW_STAT_TX_PORT_BCAST_IX, FW_STAT_TX_PORT_MCAST_IX, FW_STAT_TX_PORT_UCAST_IX, FW_STAT_TX_PORT_ERROR_IX, FW_STAT_TX_PORT_64B_IX, FW_STAT_TX_PORT_65B_127B_IX, FW_STAT_TX_PORT_128B_255B_IX, FW_STAT_TX_PORT_256B_511B_IX, FW_STAT_TX_PORT_512B_1023B_IX, FW_STAT_TX_PORT_1024B_1518B_IX, FW_STAT_TX_PORT_1519B_MAX_IX, FW_STAT_TX_PORT_DROP_IX, FW_STAT_TX_PORT_PAUSE_IX, FW_STAT_TX_PORT_PPP0_IX, FW_STAT_TX_PORT_PPP1_IX, FW_STAT_TX_PORT_PPP2_IX, FW_STAT_TX_PORT_PPP3_IX, FW_STAT_TX_PORT_PPP4_IX, FW_STAT_TX_PORT_PPP5_IX, FW_STAT_TX_PORT_PPP6_IX, FW_STAT_TX_PORT_PPP7_IX, FW_NUM_PORT_TX_STATS }; enum fw_port_stat_rx_index { FW_STAT_RX_PORT_BYTES_IX = 0, FW_STAT_RX_PORT_FRAMES_IX, FW_STAT_RX_PORT_BCAST_IX, FW_STAT_RX_PORT_MCAST_IX, FW_STAT_RX_PORT_UCAST_IX, FW_STAT_RX_PORT_MTU_ERROR_IX, FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, FW_STAT_RX_PORT_CRC_ERROR_IX, FW_STAT_RX_PORT_LEN_ERROR_IX, FW_STAT_RX_PORT_SYM_ERROR_IX, FW_STAT_RX_PORT_64B_IX, FW_STAT_RX_PORT_65B_127B_IX, FW_STAT_RX_PORT_128B_255B_IX, FW_STAT_RX_PORT_256B_511B_IX, FW_STAT_RX_PORT_512B_1023B_IX, FW_STAT_RX_PORT_1024B_1518B_IX, FW_STAT_RX_PORT_1519B_MAX_IX, FW_STAT_RX_PORT_PAUSE_IX, FW_STAT_RX_PORT_PPP0_IX, FW_STAT_RX_PORT_PPP1_IX, FW_STAT_RX_PORT_PPP2_IX, FW_STAT_RX_PORT_PPP3_IX, FW_STAT_RX_PORT_PPP4_IX, FW_STAT_RX_PORT_PPP5_IX, FW_STAT_RX_PORT_PPP6_IX, FW_STAT_RX_PORT_PPP7_IX, FW_STAT_RX_PORT_LESS_64B_IX, FW_STAT_RX_PORT_MAC_ERROR_IX, FW_NUM_PORT_RX_STATS }; /* port stats */ #define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + \ FW_NUM_PORT_RX_STATS) struct fw_port_stats_cmd { __be32 op_to_portid; __be32 retval_len16; union fw_port_stats { struct fw_port_stats_ctl { __u8 nstats_bg_bm; __u8 tx_ix; __be16 r6; __be32 r7; __be64 stat0; __be64 stat1; __be64 stat2; __be64 stat3; __be64 stat4; __be64 stat5; } ctl; struct fw_port_stats_all { __be64 tx_bytes; __be64 tx_frames; __be64 tx_bcast; __be64 tx_mcast; __be64 tx_ucast; __be64 tx_error; __be64 tx_64b; __be64 tx_65b_127b; __be64 tx_128b_255b; __be64 tx_256b_511b; __be64 tx_512b_1023b; __be64 tx_1024b_1518b; __be64 tx_1519b_max; __be64 tx_drop; __be64 tx_pause; __be64 tx_ppp0; __be64 tx_ppp1; __be64 tx_ppp2; __be64 tx_ppp3; __be64 tx_ppp4; __be64 tx_ppp5; __be64 tx_ppp6; __be64 tx_ppp7; __be64 rx_bytes; __be64 rx_frames; __be64 rx_bcast; __be64 rx_mcast; __be64 rx_ucast; __be64 rx_mtu_error; __be64 rx_mtu_crc_error; __be64 rx_crc_error; __be64 rx_len_error; __be64 rx_sym_error; __be64 rx_64b; __be64 rx_65b_127b; __be64 rx_128b_255b; __be64 rx_256b_511b; __be64 rx_512b_1023b; __be64 rx_1024b_1518b; __be64 rx_1519b_max; __be64 rx_pause; __be64 rx_ppp0; __be64 rx_ppp1; __be64 rx_ppp2; __be64 rx_ppp3; __be64 rx_ppp4; __be64 rx_ppp5; __be64 rx_ppp6; __be64 rx_ppp7; __be64 rx_less_64b; __be64 rx_bg_drop; __be64 rx_bg_trunc; } all; } u; }; #define S_FW_PORT_STATS_CMD_NSTATS 4 #define M_FW_PORT_STATS_CMD_NSTATS 0x7 #define V_FW_PORT_STATS_CMD_NSTATS(x) ((x) << S_FW_PORT_STATS_CMD_NSTATS) #define G_FW_PORT_STATS_CMD_NSTATS(x) \ (((x) >> S_FW_PORT_STATS_CMD_NSTATS) & M_FW_PORT_STATS_CMD_NSTATS) #define S_FW_PORT_STATS_CMD_BG_BM 0 #define M_FW_PORT_STATS_CMD_BG_BM 0x3 #define V_FW_PORT_STATS_CMD_BG_BM(x) ((x) << S_FW_PORT_STATS_CMD_BG_BM) #define G_FW_PORT_STATS_CMD_BG_BM(x) \ (((x) >> S_FW_PORT_STATS_CMD_BG_BM) & M_FW_PORT_STATS_CMD_BG_BM) #define S_FW_PORT_STATS_CMD_TX 7 #define M_FW_PORT_STATS_CMD_TX 0x1 #define V_FW_PORT_STATS_CMD_TX(x) ((x) << S_FW_PORT_STATS_CMD_TX) #define G_FW_PORT_STATS_CMD_TX(x) \ (((x) >> S_FW_PORT_STATS_CMD_TX) & M_FW_PORT_STATS_CMD_TX) #define F_FW_PORT_STATS_CMD_TX V_FW_PORT_STATS_CMD_TX(1U) #define S_FW_PORT_STATS_CMD_IX 0 #define M_FW_PORT_STATS_CMD_IX 0x3f #define V_FW_PORT_STATS_CMD_IX(x) ((x) << S_FW_PORT_STATS_CMD_IX) #define G_FW_PORT_STATS_CMD_IX(x) \ (((x) >> S_FW_PORT_STATS_CMD_IX) & M_FW_PORT_STATS_CMD_IX) /* port loopback stats */ #define FW_NUM_LB_STATS 14 enum fw_port_lb_stats_index { FW_STAT_LB_PORT_BYTES_IX, FW_STAT_LB_PORT_FRAMES_IX, FW_STAT_LB_PORT_BCAST_IX, FW_STAT_LB_PORT_MCAST_IX, FW_STAT_LB_PORT_UCAST_IX, FW_STAT_LB_PORT_ERROR_IX, FW_STAT_LB_PORT_64B_IX, FW_STAT_LB_PORT_65B_127B_IX, FW_STAT_LB_PORT_128B_255B_IX, FW_STAT_LB_PORT_256B_511B_IX, FW_STAT_LB_PORT_512B_1023B_IX, FW_STAT_LB_PORT_1024B_1518B_IX, FW_STAT_LB_PORT_1519B_MAX_IX, FW_STAT_LB_PORT_DROP_FRAMES_IX }; struct fw_port_lb_stats_cmd { __be32 op_to_lbport; __be32 retval_len16; union fw_port_lb_stats { struct fw_port_lb_stats_ctl { __u8 nstats_bg_bm; __u8 ix_pkd; __be16 r6; __be32 r7; __be64 stat0; __be64 stat1; __be64 stat2; __be64 stat3; __be64 stat4; __be64 stat5; } ctl; struct fw_port_lb_stats_all { __be64 tx_bytes; __be64 tx_frames; __be64 tx_bcast; __be64 tx_mcast; __be64 tx_ucast; __be64 tx_error; __be64 tx_64b; __be64 tx_65b_127b; __be64 tx_128b_255b; __be64 tx_256b_511b; __be64 tx_512b_1023b; __be64 tx_1024b_1518b; __be64 tx_1519b_max; __be64 rx_lb_drop; __be64 rx_lb_trunc; } all; } u; }; #define S_FW_PORT_LB_STATS_CMD_LBPORT 0 #define M_FW_PORT_LB_STATS_CMD_LBPORT 0xf #define V_FW_PORT_LB_STATS_CMD_LBPORT(x) \ ((x) << S_FW_PORT_LB_STATS_CMD_LBPORT) #define G_FW_PORT_LB_STATS_CMD_LBPORT(x) \ (((x) >> S_FW_PORT_LB_STATS_CMD_LBPORT) & M_FW_PORT_LB_STATS_CMD_LBPORT) #define S_FW_PORT_LB_STATS_CMD_NSTATS 4 #define M_FW_PORT_LB_STATS_CMD_NSTATS 0x7 #define V_FW_PORT_LB_STATS_CMD_NSTATS(x) \ ((x) << S_FW_PORT_LB_STATS_CMD_NSTATS) #define G_FW_PORT_LB_STATS_CMD_NSTATS(x) \ (((x) >> S_FW_PORT_LB_STATS_CMD_NSTATS) & M_FW_PORT_LB_STATS_CMD_NSTATS) #define S_FW_PORT_LB_STATS_CMD_BG_BM 0 #define M_FW_PORT_LB_STATS_CMD_BG_BM 0x3 #define V_FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << S_FW_PORT_LB_STATS_CMD_BG_BM) #define G_FW_PORT_LB_STATS_CMD_BG_BM(x) \ (((x) >> S_FW_PORT_LB_STATS_CMD_BG_BM) & M_FW_PORT_LB_STATS_CMD_BG_BM) #define S_FW_PORT_LB_STATS_CMD_IX 0 #define M_FW_PORT_LB_STATS_CMD_IX 0xf #define V_FW_PORT_LB_STATS_CMD_IX(x) ((x) << S_FW_PORT_LB_STATS_CMD_IX) #define G_FW_PORT_LB_STATS_CMD_IX(x) \ (((x) >> S_FW_PORT_LB_STATS_CMD_IX) & M_FW_PORT_LB_STATS_CMD_IX) /* Trace related defines */ #define FW_TRACE_CAPTURE_MAX_SINGLE_FLT_MODE 10240 #define FW_TRACE_CAPTURE_MAX_MULTI_FLT_MODE 2560 struct fw_port_trace_cmd { __be32 op_to_portid; __be32 retval_len16; __be16 traceen_to_pciech; __be16 qnum; __be32 r5; }; #define S_FW_PORT_TRACE_CMD_PORTID 0 #define M_FW_PORT_TRACE_CMD_PORTID 0xf #define V_FW_PORT_TRACE_CMD_PORTID(x) ((x) << S_FW_PORT_TRACE_CMD_PORTID) #define G_FW_PORT_TRACE_CMD_PORTID(x) \ (((x) >> S_FW_PORT_TRACE_CMD_PORTID) & M_FW_PORT_TRACE_CMD_PORTID) #define S_FW_PORT_TRACE_CMD_TRACEEN 15 #define M_FW_PORT_TRACE_CMD_TRACEEN 0x1 #define V_FW_PORT_TRACE_CMD_TRACEEN(x) ((x) << S_FW_PORT_TRACE_CMD_TRACEEN) #define G_FW_PORT_TRACE_CMD_TRACEEN(x) \ (((x) >> S_FW_PORT_TRACE_CMD_TRACEEN) & M_FW_PORT_TRACE_CMD_TRACEEN) #define F_FW_PORT_TRACE_CMD_TRACEEN V_FW_PORT_TRACE_CMD_TRACEEN(1U) #define S_FW_PORT_TRACE_CMD_FLTMODE 14 #define M_FW_PORT_TRACE_CMD_FLTMODE 0x1 #define V_FW_PORT_TRACE_CMD_FLTMODE(x) ((x) << S_FW_PORT_TRACE_CMD_FLTMODE) #define G_FW_PORT_TRACE_CMD_FLTMODE(x) \ (((x) >> S_FW_PORT_TRACE_CMD_FLTMODE) & M_FW_PORT_TRACE_CMD_FLTMODE) #define F_FW_PORT_TRACE_CMD_FLTMODE V_FW_PORT_TRACE_CMD_FLTMODE(1U) #define S_FW_PORT_TRACE_CMD_DUPLEN 13 #define M_FW_PORT_TRACE_CMD_DUPLEN 0x1 #define V_FW_PORT_TRACE_CMD_DUPLEN(x) ((x) << S_FW_PORT_TRACE_CMD_DUPLEN) #define G_FW_PORT_TRACE_CMD_DUPLEN(x) \ (((x) >> S_FW_PORT_TRACE_CMD_DUPLEN) & M_FW_PORT_TRACE_CMD_DUPLEN) #define F_FW_PORT_TRACE_CMD_DUPLEN V_FW_PORT_TRACE_CMD_DUPLEN(1U) #define S_FW_PORT_TRACE_CMD_RUNTFLTSIZE 8 #define M_FW_PORT_TRACE_CMD_RUNTFLTSIZE 0x1f #define V_FW_PORT_TRACE_CMD_RUNTFLTSIZE(x) \ ((x) << S_FW_PORT_TRACE_CMD_RUNTFLTSIZE) #define G_FW_PORT_TRACE_CMD_RUNTFLTSIZE(x) \ (((x) >> S_FW_PORT_TRACE_CMD_RUNTFLTSIZE) & \ M_FW_PORT_TRACE_CMD_RUNTFLTSIZE) #define S_FW_PORT_TRACE_CMD_PCIECH 6 #define M_FW_PORT_TRACE_CMD_PCIECH 0x3 #define V_FW_PORT_TRACE_CMD_PCIECH(x) ((x) << S_FW_PORT_TRACE_CMD_PCIECH) #define G_FW_PORT_TRACE_CMD_PCIECH(x) \ (((x) >> S_FW_PORT_TRACE_CMD_PCIECH) & M_FW_PORT_TRACE_CMD_PCIECH) struct fw_port_trace_mmap_cmd { __be32 op_to_portid; __be32 retval_len16; __be32 fid_to_skipoffset; __be32 minpktsize_capturemax; __u8 map[224]; }; #define S_FW_PORT_TRACE_MMAP_CMD_PORTID 0 #define M_FW_PORT_TRACE_MMAP_CMD_PORTID 0xf #define V_FW_PORT_TRACE_MMAP_CMD_PORTID(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_PORTID) #define G_FW_PORT_TRACE_MMAP_CMD_PORTID(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_PORTID) & \ M_FW_PORT_TRACE_MMAP_CMD_PORTID) #define S_FW_PORT_TRACE_MMAP_CMD_FID 30 #define M_FW_PORT_TRACE_MMAP_CMD_FID 0x3 #define V_FW_PORT_TRACE_MMAP_CMD_FID(x) ((x) << S_FW_PORT_TRACE_MMAP_CMD_FID) #define G_FW_PORT_TRACE_MMAP_CMD_FID(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_FID) & M_FW_PORT_TRACE_MMAP_CMD_FID) #define S_FW_PORT_TRACE_MMAP_CMD_MMAPEN 29 #define M_FW_PORT_TRACE_MMAP_CMD_MMAPEN 0x1 #define V_FW_PORT_TRACE_MMAP_CMD_MMAPEN(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_MMAPEN) #define G_FW_PORT_TRACE_MMAP_CMD_MMAPEN(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_MMAPEN) & \ M_FW_PORT_TRACE_MMAP_CMD_MMAPEN) #define F_FW_PORT_TRACE_MMAP_CMD_MMAPEN V_FW_PORT_TRACE_MMAP_CMD_MMAPEN(1U) #define S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN 28 #define M_FW_PORT_TRACE_MMAP_CMD_DCMAPEN 0x1 #define V_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN) #define G_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN) & \ M_FW_PORT_TRACE_MMAP_CMD_DCMAPEN) #define F_FW_PORT_TRACE_MMAP_CMD_DCMAPEN V_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(1U) #define S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH 8 #define M_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH 0x1f #define V_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH) #define G_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH) & \ M_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH) #define S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET 0 #define M_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET 0x1f #define V_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET) #define G_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET) & \ M_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET) #define S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE 18 #define M_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE 0x3fff #define V_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE) #define G_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE) & \ M_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE) #define S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX 0 #define M_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX 0x3fff #define V_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX) #define G_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX) & \ M_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX) enum fw_ptp_subop { /* none */ FW_PTP_SC_INIT_TIMER = 0x00, FW_PTP_SC_TX_TYPE = 0x01, /* init */ FW_PTP_SC_RXTIME_STAMP = 0x08, FW_PTP_SC_RDRX_TYPE = 0x09, /* ts */ FW_PTP_SC_ADJ_FREQ = 0x10, FW_PTP_SC_ADJ_TIME = 0x11, FW_PTP_SC_ADJ_FTIME = 0x12, FW_PTP_SC_WALL_CLOCK = 0x13, FW_PTP_SC_GET_TIME = 0x14, FW_PTP_SC_SET_TIME = 0x15, }; struct fw_ptp_cmd { __be32 op_to_portid; __be32 retval_len16; union fw_ptp { struct fw_ptp_sc { __u8 sc; __u8 r3[7]; } scmd; struct fw_ptp_init { __u8 sc; __u8 txchan; __be16 absid; __be16 mode; __be16 r3; } init; struct fw_ptp_ts { __u8 sc; __u8 sign; __be16 r3; __be32 ppb; __be64 tm; } ts; } u; __be64 r3; }; #define S_FW_PTP_CMD_PORTID 0 #define M_FW_PTP_CMD_PORTID 0xf #define V_FW_PTP_CMD_PORTID(x) ((x) << S_FW_PTP_CMD_PORTID) #define G_FW_PTP_CMD_PORTID(x) \ (((x) >> S_FW_PTP_CMD_PORTID) & M_FW_PTP_CMD_PORTID) struct fw_rss_ind_tbl_cmd { __be32 op_to_viid; __be32 retval_len16; __be16 niqid; __be16 startidx; __be32 r3; __be32 iq0_to_iq2; __be32 iq3_to_iq5; __be32 iq6_to_iq8; __be32 iq9_to_iq11; __be32 iq12_to_iq14; __be32 iq15_to_iq17; __be32 iq18_to_iq20; __be32 iq21_to_iq23; __be32 iq24_to_iq26; __be32 iq27_to_iq29; __be32 iq30_iq31; __be32 r15_lo; }; #define S_FW_RSS_IND_TBL_CMD_VIID 0 #define M_FW_RSS_IND_TBL_CMD_VIID 0xfff #define V_FW_RSS_IND_TBL_CMD_VIID(x) ((x) << S_FW_RSS_IND_TBL_CMD_VIID) #define G_FW_RSS_IND_TBL_CMD_VIID(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_VIID) & M_FW_RSS_IND_TBL_CMD_VIID) #define S_FW_RSS_IND_TBL_CMD_IQ0 20 #define M_FW_RSS_IND_TBL_CMD_IQ0 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ0) #define G_FW_RSS_IND_TBL_CMD_IQ0(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ0) & M_FW_RSS_IND_TBL_CMD_IQ0) #define S_FW_RSS_IND_TBL_CMD_IQ1 10 #define M_FW_RSS_IND_TBL_CMD_IQ1 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ1) #define G_FW_RSS_IND_TBL_CMD_IQ1(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ1) & M_FW_RSS_IND_TBL_CMD_IQ1) #define S_FW_RSS_IND_TBL_CMD_IQ2 0 #define M_FW_RSS_IND_TBL_CMD_IQ2 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ2) #define G_FW_RSS_IND_TBL_CMD_IQ2(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2) #define S_FW_RSS_IND_TBL_CMD_IQ3 20 #define M_FW_RSS_IND_TBL_CMD_IQ3 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ3(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ3) #define G_FW_RSS_IND_TBL_CMD_IQ3(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ3) & M_FW_RSS_IND_TBL_CMD_IQ3) #define S_FW_RSS_IND_TBL_CMD_IQ4 10 #define M_FW_RSS_IND_TBL_CMD_IQ4 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ4(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ4) #define G_FW_RSS_IND_TBL_CMD_IQ4(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ4) & M_FW_RSS_IND_TBL_CMD_IQ4) #define S_FW_RSS_IND_TBL_CMD_IQ5 0 #define M_FW_RSS_IND_TBL_CMD_IQ5 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ5(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ5) #define G_FW_RSS_IND_TBL_CMD_IQ5(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ5) & M_FW_RSS_IND_TBL_CMD_IQ5) #define S_FW_RSS_IND_TBL_CMD_IQ6 20 #define M_FW_RSS_IND_TBL_CMD_IQ6 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ6(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ6) #define G_FW_RSS_IND_TBL_CMD_IQ6(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ6) & M_FW_RSS_IND_TBL_CMD_IQ6) #define S_FW_RSS_IND_TBL_CMD_IQ7 10 #define M_FW_RSS_IND_TBL_CMD_IQ7 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ7(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ7) #define G_FW_RSS_IND_TBL_CMD_IQ7(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ7) & M_FW_RSS_IND_TBL_CMD_IQ7) #define S_FW_RSS_IND_TBL_CMD_IQ8 0 #define M_FW_RSS_IND_TBL_CMD_IQ8 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ8(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ8) #define G_FW_RSS_IND_TBL_CMD_IQ8(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ8) & M_FW_RSS_IND_TBL_CMD_IQ8) #define S_FW_RSS_IND_TBL_CMD_IQ9 20 #define M_FW_RSS_IND_TBL_CMD_IQ9 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ9(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ9) #define G_FW_RSS_IND_TBL_CMD_IQ9(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ9) & M_FW_RSS_IND_TBL_CMD_IQ9) #define S_FW_RSS_IND_TBL_CMD_IQ10 10 #define M_FW_RSS_IND_TBL_CMD_IQ10 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ10(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ10) #define G_FW_RSS_IND_TBL_CMD_IQ10(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ10) & M_FW_RSS_IND_TBL_CMD_IQ10) #define S_FW_RSS_IND_TBL_CMD_IQ11 0 #define M_FW_RSS_IND_TBL_CMD_IQ11 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ11(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ11) #define G_FW_RSS_IND_TBL_CMD_IQ11(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ11) & M_FW_RSS_IND_TBL_CMD_IQ11) #define S_FW_RSS_IND_TBL_CMD_IQ12 20 #define M_FW_RSS_IND_TBL_CMD_IQ12 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ12(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ12) #define G_FW_RSS_IND_TBL_CMD_IQ12(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ12) & M_FW_RSS_IND_TBL_CMD_IQ12) #define S_FW_RSS_IND_TBL_CMD_IQ13 10 #define M_FW_RSS_IND_TBL_CMD_IQ13 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ13(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ13) #define G_FW_RSS_IND_TBL_CMD_IQ13(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ13) & M_FW_RSS_IND_TBL_CMD_IQ13) #define S_FW_RSS_IND_TBL_CMD_IQ14 0 #define M_FW_RSS_IND_TBL_CMD_IQ14 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ14(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ14) #define G_FW_RSS_IND_TBL_CMD_IQ14(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ14) & M_FW_RSS_IND_TBL_CMD_IQ14) #define S_FW_RSS_IND_TBL_CMD_IQ15 20 #define M_FW_RSS_IND_TBL_CMD_IQ15 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ15(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ15) #define G_FW_RSS_IND_TBL_CMD_IQ15(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ15) & M_FW_RSS_IND_TBL_CMD_IQ15) #define S_FW_RSS_IND_TBL_CMD_IQ16 10 #define M_FW_RSS_IND_TBL_CMD_IQ16 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ16(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ16) #define G_FW_RSS_IND_TBL_CMD_IQ16(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ16) & M_FW_RSS_IND_TBL_CMD_IQ16) #define S_FW_RSS_IND_TBL_CMD_IQ17 0 #define M_FW_RSS_IND_TBL_CMD_IQ17 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ17(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ17) #define G_FW_RSS_IND_TBL_CMD_IQ17(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ17) & M_FW_RSS_IND_TBL_CMD_IQ17) #define S_FW_RSS_IND_TBL_CMD_IQ18 20 #define M_FW_RSS_IND_TBL_CMD_IQ18 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ18(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ18) #define G_FW_RSS_IND_TBL_CMD_IQ18(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ18) & M_FW_RSS_IND_TBL_CMD_IQ18) #define S_FW_RSS_IND_TBL_CMD_IQ19 10 #define M_FW_RSS_IND_TBL_CMD_IQ19 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ19(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ19) #define G_FW_RSS_IND_TBL_CMD_IQ19(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ19) & M_FW_RSS_IND_TBL_CMD_IQ19) #define S_FW_RSS_IND_TBL_CMD_IQ20 0 #define M_FW_RSS_IND_TBL_CMD_IQ20 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ20(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ20) #define G_FW_RSS_IND_TBL_CMD_IQ20(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ20) & M_FW_RSS_IND_TBL_CMD_IQ20) #define S_FW_RSS_IND_TBL_CMD_IQ21 20 #define M_FW_RSS_IND_TBL_CMD_IQ21 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ21(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ21) #define G_FW_RSS_IND_TBL_CMD_IQ21(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ21) & M_FW_RSS_IND_TBL_CMD_IQ21) #define S_FW_RSS_IND_TBL_CMD_IQ22 10 #define M_FW_RSS_IND_TBL_CMD_IQ22 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ22(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ22) #define G_FW_RSS_IND_TBL_CMD_IQ22(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ22) & M_FW_RSS_IND_TBL_CMD_IQ22) #define S_FW_RSS_IND_TBL_CMD_IQ23 0 #define M_FW_RSS_IND_TBL_CMD_IQ23 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ23(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ23) #define G_FW_RSS_IND_TBL_CMD_IQ23(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ23) & M_FW_RSS_IND_TBL_CMD_IQ23) #define S_FW_RSS_IND_TBL_CMD_IQ24 20 #define M_FW_RSS_IND_TBL_CMD_IQ24 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ24(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ24) #define G_FW_RSS_IND_TBL_CMD_IQ24(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ24) & M_FW_RSS_IND_TBL_CMD_IQ24) #define S_FW_RSS_IND_TBL_CMD_IQ25 10 #define M_FW_RSS_IND_TBL_CMD_IQ25 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ25(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ25) #define G_FW_RSS_IND_TBL_CMD_IQ25(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ25) & M_FW_RSS_IND_TBL_CMD_IQ25) #define S_FW_RSS_IND_TBL_CMD_IQ26 0 #define M_FW_RSS_IND_TBL_CMD_IQ26 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ26(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ26) #define G_FW_RSS_IND_TBL_CMD_IQ26(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ26) & M_FW_RSS_IND_TBL_CMD_IQ26) #define S_FW_RSS_IND_TBL_CMD_IQ27 20 #define M_FW_RSS_IND_TBL_CMD_IQ27 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ27(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ27) #define G_FW_RSS_IND_TBL_CMD_IQ27(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ27) & M_FW_RSS_IND_TBL_CMD_IQ27) #define S_FW_RSS_IND_TBL_CMD_IQ28 10 #define M_FW_RSS_IND_TBL_CMD_IQ28 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ28(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ28) #define G_FW_RSS_IND_TBL_CMD_IQ28(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ28) & M_FW_RSS_IND_TBL_CMD_IQ28) #define S_FW_RSS_IND_TBL_CMD_IQ29 0 #define M_FW_RSS_IND_TBL_CMD_IQ29 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ29(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ29) #define G_FW_RSS_IND_TBL_CMD_IQ29(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ29) & M_FW_RSS_IND_TBL_CMD_IQ29) #define S_FW_RSS_IND_TBL_CMD_IQ30 20 #define M_FW_RSS_IND_TBL_CMD_IQ30 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ30(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ30) #define G_FW_RSS_IND_TBL_CMD_IQ30(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ30) & M_FW_RSS_IND_TBL_CMD_IQ30) #define S_FW_RSS_IND_TBL_CMD_IQ31 10 #define M_FW_RSS_IND_TBL_CMD_IQ31 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ31(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ31) #define G_FW_RSS_IND_TBL_CMD_IQ31(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ31) & M_FW_RSS_IND_TBL_CMD_IQ31) struct fw_rss_glb_config_cmd { __be32 op_to_write; __be32 retval_len16; union fw_rss_glb_config { struct fw_rss_glb_config_manual { __be32 mode_pkd; __be32 r3; __be64 r4; __be64 r5; } manual; struct fw_rss_glb_config_basicvirtual { __be32 mode_keymode; __be32 synmapen_to_hashtoeplitz; __be64 r8; __be64 r9; } basicvirtual; } u; }; #define S_FW_RSS_GLB_CONFIG_CMD_MODE 28 #define M_FW_RSS_GLB_CONFIG_CMD_MODE 0xf #define V_FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << S_FW_RSS_GLB_CONFIG_CMD_MODE) #define G_FW_RSS_GLB_CONFIG_CMD_MODE(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_MODE) & M_FW_RSS_GLB_CONFIG_CMD_MODE) #define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 #define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 #define FW_RSS_GLB_CONFIG_CMD_MODE_MAX 1 #define S_FW_RSS_GLB_CONFIG_CMD_KEYMODE 26 #define M_FW_RSS_GLB_CONFIG_CMD_KEYMODE 0x3 #define V_FW_RSS_GLB_CONFIG_CMD_KEYMODE(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_KEYMODE) #define G_FW_RSS_GLB_CONFIG_CMD_KEYMODE(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_KEYMODE) & \ M_FW_RSS_GLB_CONFIG_CMD_KEYMODE) #define FW_RSS_GLB_CONFIG_CMD_KEYMODE_GLBKEY 0 #define FW_RSS_GLB_CONFIG_CMD_KEYMODE_GLBVF_KEY 1 #define FW_RSS_GLB_CONFIG_CMD_KEYMODE_PFVF_KEY 2 #define FW_RSS_GLB_CONFIG_CMD_KEYMODE_IDXVF_KEY 3 #define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 8 #define M_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) #define G_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) & \ M_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) #define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U) #define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 7 #define M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) #define G_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) & \ M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) #define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 \ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(1U) #define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 6 #define M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) #define G_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) & \ M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) #define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 \ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(1U) #define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 5 #define M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) #define G_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) & \ M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) #define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 \ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(1U) #define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 4 #define M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) #define G_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) & \ M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) #define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 \ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(1U) #define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 3 #define M_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) #define G_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) & \ M_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) #define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U) #define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 2 #define M_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) #define G_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) & \ M_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) #define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U) #define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 1 #define M_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) #define G_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) & \ M_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) #define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP \ V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(1U) #define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0 #define M_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) #define G_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) & \ M_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) #define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ \ V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(1U) struct fw_rss_vi_config_cmd { __be32 op_to_viid; __be32 retval_len16; union fw_rss_vi_config { struct fw_rss_vi_config_manual { __be64 r3; __be64 r4; __be64 r5; } manual; struct fw_rss_vi_config_basicvirtual { __be32 r6; __be32 defaultq_to_udpen; __be32 secretkeyidx_pkd; __be32 secretkeyxor; __be64 r10; } basicvirtual; } u; }; #define S_FW_RSS_VI_CONFIG_CMD_VIID 0 #define M_FW_RSS_VI_CONFIG_CMD_VIID 0xfff #define V_FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_VIID) #define G_FW_RSS_VI_CONFIG_CMD_VIID(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_VIID) & M_FW_RSS_VI_CONFIG_CMD_VIID) #define S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 16 #define M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 0x3ff #define V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \ ((x) << S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) #define G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) & \ M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) #define S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 4 #define M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 0x1 #define V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) #define G_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) & \ M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) #define F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN \ V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(1U) #define S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 3 #define M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 0x1 #define V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) #define G_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) & \ M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) #define F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN \ V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(1U) #define S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 2 #define M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 0x1 #define V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) #define G_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) & \ M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) #define F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN \ V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(1U) #define S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 1 #define M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 0x1 #define V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) #define G_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) & \ M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) #define F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN \ V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(1U) #define S_FW_RSS_VI_CONFIG_CMD_UDPEN 0 #define M_FW_RSS_VI_CONFIG_CMD_UDPEN 0x1 #define V_FW_RSS_VI_CONFIG_CMD_UDPEN(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_UDPEN) #define G_FW_RSS_VI_CONFIG_CMD_UDPEN(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN) #define F_FW_RSS_VI_CONFIG_CMD_UDPEN V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U) #define S_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX 0 #define M_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX 0xf #define V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(x) \ ((x) << S_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX) #define G_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX) & \ M_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX) enum fw_sched_sc { FW_SCHED_SC_CONFIG = 0, FW_SCHED_SC_PARAMS = 1, }; enum fw_sched_type { FW_SCHED_TYPE_PKTSCHED = 0, FW_SCHED_TYPE_STREAMSCHED = 1, }; enum fw_sched_params_level { FW_SCHED_PARAMS_LEVEL_CL_RL = 0, FW_SCHED_PARAMS_LEVEL_CL_WRR = 1, FW_SCHED_PARAMS_LEVEL_CH_RL = 2, }; enum fw_sched_params_mode { FW_SCHED_PARAMS_MODE_CLASS = 0, FW_SCHED_PARAMS_MODE_FLOW = 1, }; enum fw_sched_params_unit { FW_SCHED_PARAMS_UNIT_BITRATE = 0, FW_SCHED_PARAMS_UNIT_PKTRATE = 1, }; enum fw_sched_params_rate { FW_SCHED_PARAMS_RATE_REL = 0, FW_SCHED_PARAMS_RATE_ABS = 1, }; struct fw_sched_cmd { __be32 op_to_write; __be32 retval_len16; union fw_sched { struct fw_sched_config { __u8 sc; __u8 type; __u8 minmaxen; __u8 r3[5]; __u8 nclasses[4]; __be32 r4; } config; struct fw_sched_params { __u8 sc; __u8 type; __u8 level; __u8 mode; __u8 unit; __u8 rate; __u8 ch; __u8 cl; __be32 min; __be32 max; __be16 weight; __be16 pktsize; __be16 burstsize; __be16 r4; } params; } u; }; /* * length of the formatting string */ #define FW_DEVLOG_FMT_LEN 192 /* * maximum number of the formatting string parameters */ #define FW_DEVLOG_FMT_PARAMS_NUM 8 /* * priority levels */ enum fw_devlog_level { FW_DEVLOG_LEVEL_EMERG = 0x0, FW_DEVLOG_LEVEL_CRIT = 0x1, FW_DEVLOG_LEVEL_ERR = 0x2, FW_DEVLOG_LEVEL_NOTICE = 0x3, FW_DEVLOG_LEVEL_INFO = 0x4, FW_DEVLOG_LEVEL_DEBUG = 0x5, FW_DEVLOG_LEVEL_MAX = 0x5, }; /* * facilities that may send a log message */ enum fw_devlog_facility { FW_DEVLOG_FACILITY_CORE = 0x00, FW_DEVLOG_FACILITY_CF = 0x01, FW_DEVLOG_FACILITY_SCHED = 0x02, FW_DEVLOG_FACILITY_TIMER = 0x04, FW_DEVLOG_FACILITY_RES = 0x06, FW_DEVLOG_FACILITY_HW = 0x08, FW_DEVLOG_FACILITY_FLR = 0x10, FW_DEVLOG_FACILITY_DMAQ = 0x12, FW_DEVLOG_FACILITY_PHY = 0x14, FW_DEVLOG_FACILITY_MAC = 0x16, FW_DEVLOG_FACILITY_PORT = 0x18, FW_DEVLOG_FACILITY_VI = 0x1A, FW_DEVLOG_FACILITY_FILTER = 0x1C, FW_DEVLOG_FACILITY_ACL = 0x1E, FW_DEVLOG_FACILITY_TM = 0x20, FW_DEVLOG_FACILITY_QFC = 0x22, FW_DEVLOG_FACILITY_DCB = 0x24, FW_DEVLOG_FACILITY_ETH = 0x26, FW_DEVLOG_FACILITY_OFLD = 0x28, FW_DEVLOG_FACILITY_RI = 0x2A, FW_DEVLOG_FACILITY_ISCSI = 0x2C, FW_DEVLOG_FACILITY_FCOE = 0x2E, FW_DEVLOG_FACILITY_FOISCSI = 0x30, FW_DEVLOG_FACILITY_FOFCOE = 0x32, FW_DEVLOG_FACILITY_CHNET = 0x34, FW_DEVLOG_FACILITY_COISCSI = 0x36, FW_DEVLOG_FACILITY_MAX = 0x38, }; /* * log message format */ struct fw_devlog_e { __be64 timestamp; __be32 seqno; __be16 reserved1; __u8 level; __u8 facility; __u8 fmt[FW_DEVLOG_FMT_LEN]; __be32 params[FW_DEVLOG_FMT_PARAMS_NUM]; __be32 reserved3[4]; }; struct fw_devlog_cmd { __be32 op_to_write; __be32 retval_len16; __u8 level; __u8 r2[7]; __be32 memtype_devlog_memaddr16_devlog; __be32 memsize_devlog; __be32 r3[2]; }; #define S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG 28 #define M_FW_DEVLOG_CMD_MEMTYPE_DEVLOG 0xf #define V_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(x) \ ((x) << S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG) #define G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(x) \ (((x) >> S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG) & M_FW_DEVLOG_CMD_MEMTYPE_DEVLOG) #define S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG 0 #define M_FW_DEVLOG_CMD_MEMADDR16_DEVLOG 0xfffffff #define V_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(x) \ ((x) << S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG) #define G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(x) \ (((x) >> S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG) & \ M_FW_DEVLOG_CMD_MEMADDR16_DEVLOG) enum fw_watchdog_actions { FW_WATCHDOG_ACTION_SHUTDOWN = 0, FW_WATCHDOG_ACTION_FLR = 1, FW_WATCHDOG_ACTION_BYPASS = 2, FW_WATCHDOG_ACTION_TMPCHK = 3, FW_WATCHDOG_ACTION_PAUSEOFF = 4, FW_WATCHDOG_ACTION_MAX = 5, }; #define FW_WATCHDOG_MAX_TIMEOUT_SECS 60 struct fw_watchdog_cmd { __be32 op_to_vfn; __be32 retval_len16; __be32 timeout; __be32 action; }; #define S_FW_WATCHDOG_CMD_PFN 8 #define M_FW_WATCHDOG_CMD_PFN 0x7 #define V_FW_WATCHDOG_CMD_PFN(x) ((x) << S_FW_WATCHDOG_CMD_PFN) #define G_FW_WATCHDOG_CMD_PFN(x) \ (((x) >> S_FW_WATCHDOG_CMD_PFN) & M_FW_WATCHDOG_CMD_PFN) #define S_FW_WATCHDOG_CMD_VFN 0 #define M_FW_WATCHDOG_CMD_VFN 0xff #define V_FW_WATCHDOG_CMD_VFN(x) ((x) << S_FW_WATCHDOG_CMD_VFN) #define G_FW_WATCHDOG_CMD_VFN(x) \ (((x) >> S_FW_WATCHDOG_CMD_VFN) & M_FW_WATCHDOG_CMD_VFN) struct fw_clip_cmd { __be32 op_to_write; __be32 alloc_to_len16; __be64 ip_hi; __be64 ip_lo; __be32 r4[2]; }; #define S_FW_CLIP_CMD_ALLOC 31 #define M_FW_CLIP_CMD_ALLOC 0x1 #define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC) #define G_FW_CLIP_CMD_ALLOC(x) \ (((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC) #define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U) #define S_FW_CLIP_CMD_FREE 30 #define M_FW_CLIP_CMD_FREE 0x1 #define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE) #define G_FW_CLIP_CMD_FREE(x) \ (((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE) #define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U) #define S_FW_CLIP_CMD_INDEX 16 #define M_FW_CLIP_CMD_INDEX 0x1fff #define V_FW_CLIP_CMD_INDEX(x) ((x) << S_FW_CLIP_CMD_INDEX) #define G_FW_CLIP_CMD_INDEX(x) \ (((x) >> S_FW_CLIP_CMD_INDEX) & M_FW_CLIP_CMD_INDEX) /****************************************************************************** * F O i S C S I C O M M A N D s **************************************/ #define FW_CHNET_IFACE_ADDR_MAX 3 enum fw_chnet_iface_cmd_subop { FW_CHNET_IFACE_CMD_SUBOP_NOOP = 0, FW_CHNET_IFACE_CMD_SUBOP_LINK_UP, FW_CHNET_IFACE_CMD_SUBOP_LINK_DOWN, FW_CHNET_IFACE_CMD_SUBOP_MTU_SET, FW_CHNET_IFACE_CMD_SUBOP_MTU_GET, FW_CHNET_IFACE_CMD_SUBOP_MAX, }; struct fw_chnet_iface_cmd { __be32 op_to_portid; __be32 retval_len16; __u8 subop; __u8 r2[2]; __u8 flags; __be32 ifid_ifstate; __be16 mtu; __be16 vlanid; __be32 r3; __be16 r4; __u8 mac[6]; }; #define S_FW_CHNET_IFACE_CMD_PORTID 0 #define M_FW_CHNET_IFACE_CMD_PORTID 0xf #define V_FW_CHNET_IFACE_CMD_PORTID(x) ((x) << S_FW_CHNET_IFACE_CMD_PORTID) #define G_FW_CHNET_IFACE_CMD_PORTID(x) \ (((x) >> S_FW_CHNET_IFACE_CMD_PORTID) & M_FW_CHNET_IFACE_CMD_PORTID) #define S_FW_CHNET_IFACE_CMD_RSS_IQID 16 #define M_FW_CHNET_IFACE_CMD_RSS_IQID 0xffff #define V_FW_CHNET_IFACE_CMD_RSS_IQID(x) \ ((x) << S_FW_CHNET_IFACE_CMD_RSS_IQID) #define G_FW_CHNET_IFACE_CMD_RSS_IQID(x) \ (((x) >> S_FW_CHNET_IFACE_CMD_RSS_IQID) & M_FW_CHNET_IFACE_CMD_RSS_IQID) #define S_FW_CHNET_IFACE_CMD_RSS_IQID_F 0 #define M_FW_CHNET_IFACE_CMD_RSS_IQID_F 0x1 #define V_FW_CHNET_IFACE_CMD_RSS_IQID_F(x) \ ((x) << S_FW_CHNET_IFACE_CMD_RSS_IQID_F) #define G_FW_CHNET_IFACE_CMD_RSS_IQID_F(x) \ (((x) >> S_FW_CHNET_IFACE_CMD_RSS_IQID_F) & \ M_FW_CHNET_IFACE_CMD_RSS_IQID_F) #define F_FW_CHNET_IFACE_CMD_RSS_IQID_F V_FW_CHNET_IFACE_CMD_RSS_IQID_F(1U) #define S_FW_CHNET_IFACE_CMD_IFID 8 #define M_FW_CHNET_IFACE_CMD_IFID 0xffffff #define V_FW_CHNET_IFACE_CMD_IFID(x) ((x) << S_FW_CHNET_IFACE_CMD_IFID) #define G_FW_CHNET_IFACE_CMD_IFID(x) \ (((x) >> S_FW_CHNET_IFACE_CMD_IFID) & M_FW_CHNET_IFACE_CMD_IFID) #define S_FW_CHNET_IFACE_CMD_IFSTATE 0 #define M_FW_CHNET_IFACE_CMD_IFSTATE 0xff #define V_FW_CHNET_IFACE_CMD_IFSTATE(x) ((x) << S_FW_CHNET_IFACE_CMD_IFSTATE) #define G_FW_CHNET_IFACE_CMD_IFSTATE(x) \ (((x) >> S_FW_CHNET_IFACE_CMD_IFSTATE) & M_FW_CHNET_IFACE_CMD_IFSTATE) struct fw_fcoe_res_info_cmd { __be32 op_to_read; __be32 retval_len16; __be16 e_d_tov; __be16 r_a_tov_seq; __be16 r_a_tov_els; __be16 r_r_tov; __be32 max_xchgs; __be32 max_ssns; __be32 used_xchgs; __be32 used_ssns; __be32 max_fcfs; __be32 max_vnps; __be32 used_fcfs; __be32 used_vnps; }; struct fw_fcoe_link_cmd { __be32 op_to_portid; __be32 retval_len16; __be32 sub_opcode_fcfi; __u8 r3; __u8 lstatus; __be16 flags; __u8 r4; __u8 set_vlan; __be16 vlan_id; __be32 vnpi_pkd; __be16 r6; __u8 phy_mac[6]; __u8 vnport_wwnn[8]; __u8 vnport_wwpn[8]; }; #define S_FW_FCOE_LINK_CMD_PORTID 0 #define M_FW_FCOE_LINK_CMD_PORTID 0xf #define V_FW_FCOE_LINK_CMD_PORTID(x) ((x) << S_FW_FCOE_LINK_CMD_PORTID) #define G_FW_FCOE_LINK_CMD_PORTID(x) \ (((x) >> S_FW_FCOE_LINK_CMD_PORTID) & M_FW_FCOE_LINK_CMD_PORTID) #define S_FW_FCOE_LINK_CMD_SUB_OPCODE 24 #define M_FW_FCOE_LINK_CMD_SUB_OPCODE 0xff #define V_FW_FCOE_LINK_CMD_SUB_OPCODE(x) \ ((x) << S_FW_FCOE_LINK_CMD_SUB_OPCODE) #define G_FW_FCOE_LINK_CMD_SUB_OPCODE(x) \ (((x) >> S_FW_FCOE_LINK_CMD_SUB_OPCODE) & M_FW_FCOE_LINK_CMD_SUB_OPCODE) #define S_FW_FCOE_LINK_CMD_FCFI 0 #define M_FW_FCOE_LINK_CMD_FCFI 0xffffff #define V_FW_FCOE_LINK_CMD_FCFI(x) ((x) << S_FW_FCOE_LINK_CMD_FCFI) #define G_FW_FCOE_LINK_CMD_FCFI(x) \ (((x) >> S_FW_FCOE_LINK_CMD_FCFI) & M_FW_FCOE_LINK_CMD_FCFI) #define S_FW_FCOE_LINK_CMD_VNPI 0 #define M_FW_FCOE_LINK_CMD_VNPI 0xfffff #define V_FW_FCOE_LINK_CMD_VNPI(x) ((x) << S_FW_FCOE_LINK_CMD_VNPI) #define G_FW_FCOE_LINK_CMD_VNPI(x) \ (((x) >> S_FW_FCOE_LINK_CMD_VNPI) & M_FW_FCOE_LINK_CMD_VNPI) struct fw_fcoe_vnp_cmd { __be32 op_to_fcfi; __be32 alloc_to_len16; __be32 gen_wwn_to_vnpi; __be32 vf_id; __be16 iqid; __u8 vnport_mac[6]; __u8 vnport_wwnn[8]; __u8 vnport_wwpn[8]; __u8 cmn_srv_parms[16]; __u8 clsp_word_0_1[8]; }; #define S_FW_FCOE_VNP_CMD_FCFI 0 #define M_FW_FCOE_VNP_CMD_FCFI 0xfffff #define V_FW_FCOE_VNP_CMD_FCFI(x) ((x) << S_FW_FCOE_VNP_CMD_FCFI) #define G_FW_FCOE_VNP_CMD_FCFI(x) \ (((x) >> S_FW_FCOE_VNP_CMD_FCFI) & M_FW_FCOE_VNP_CMD_FCFI) #define S_FW_FCOE_VNP_CMD_ALLOC 31 #define M_FW_FCOE_VNP_CMD_ALLOC 0x1 #define V_FW_FCOE_VNP_CMD_ALLOC(x) ((x) << S_FW_FCOE_VNP_CMD_ALLOC) #define G_FW_FCOE_VNP_CMD_ALLOC(x) \ (((x) >> S_FW_FCOE_VNP_CMD_ALLOC) & M_FW_FCOE_VNP_CMD_ALLOC) #define F_FW_FCOE_VNP_CMD_ALLOC V_FW_FCOE_VNP_CMD_ALLOC(1U) #define S_FW_FCOE_VNP_CMD_FREE 30 #define M_FW_FCOE_VNP_CMD_FREE 0x1 #define V_FW_FCOE_VNP_CMD_FREE(x) ((x) << S_FW_FCOE_VNP_CMD_FREE) #define G_FW_FCOE_VNP_CMD_FREE(x) \ (((x) >> S_FW_FCOE_VNP_CMD_FREE) & M_FW_FCOE_VNP_CMD_FREE) #define F_FW_FCOE_VNP_CMD_FREE V_FW_FCOE_VNP_CMD_FREE(1U) #define S_FW_FCOE_VNP_CMD_MODIFY 29 #define M_FW_FCOE_VNP_CMD_MODIFY 0x1 #define V_FW_FCOE_VNP_CMD_MODIFY(x) ((x) << S_FW_FCOE_VNP_CMD_MODIFY) #define G_FW_FCOE_VNP_CMD_MODIFY(x) \ (((x) >> S_FW_FCOE_VNP_CMD_MODIFY) & M_FW_FCOE_VNP_CMD_MODIFY) #define F_FW_FCOE_VNP_CMD_MODIFY V_FW_FCOE_VNP_CMD_MODIFY(1U) #define S_FW_FCOE_VNP_CMD_GEN_WWN 22 #define M_FW_FCOE_VNP_CMD_GEN_WWN 0x1 #define V_FW_FCOE_VNP_CMD_GEN_WWN(x) ((x) << S_FW_FCOE_VNP_CMD_GEN_WWN) #define G_FW_FCOE_VNP_CMD_GEN_WWN(x) \ (((x) >> S_FW_FCOE_VNP_CMD_GEN_WWN) & M_FW_FCOE_VNP_CMD_GEN_WWN) #define F_FW_FCOE_VNP_CMD_GEN_WWN V_FW_FCOE_VNP_CMD_GEN_WWN(1U) #define S_FW_FCOE_VNP_CMD_PERSIST 21 #define M_FW_FCOE_VNP_CMD_PERSIST 0x1 #define V_FW_FCOE_VNP_CMD_PERSIST(x) ((x) << S_FW_FCOE_VNP_CMD_PERSIST) #define G_FW_FCOE_VNP_CMD_PERSIST(x) \ (((x) >> S_FW_FCOE_VNP_CMD_PERSIST) & M_FW_FCOE_VNP_CMD_PERSIST) #define F_FW_FCOE_VNP_CMD_PERSIST V_FW_FCOE_VNP_CMD_PERSIST(1U) #define S_FW_FCOE_VNP_CMD_VFID_EN 20 #define M_FW_FCOE_VNP_CMD_VFID_EN 0x1 #define V_FW_FCOE_VNP_CMD_VFID_EN(x) ((x) << S_FW_FCOE_VNP_CMD_VFID_EN) #define G_FW_FCOE_VNP_CMD_VFID_EN(x) \ (((x) >> S_FW_FCOE_VNP_CMD_VFID_EN) & M_FW_FCOE_VNP_CMD_VFID_EN) #define F_FW_FCOE_VNP_CMD_VFID_EN V_FW_FCOE_VNP_CMD_VFID_EN(1U) #define S_FW_FCOE_VNP_CMD_VNPI 0 #define M_FW_FCOE_VNP_CMD_VNPI 0xfffff #define V_FW_FCOE_VNP_CMD_VNPI(x) ((x) << S_FW_FCOE_VNP_CMD_VNPI) #define G_FW_FCOE_VNP_CMD_VNPI(x) \ (((x) >> S_FW_FCOE_VNP_CMD_VNPI) & M_FW_FCOE_VNP_CMD_VNPI) struct fw_fcoe_sparams_cmd { __be32 op_to_portid; __be32 retval_len16; __u8 r3[7]; __u8 cos; __u8 lport_wwnn[8]; __u8 lport_wwpn[8]; __u8 cmn_srv_parms[16]; __u8 cls_srv_parms[16]; }; #define S_FW_FCOE_SPARAMS_CMD_PORTID 0 #define M_FW_FCOE_SPARAMS_CMD_PORTID 0xf #define V_FW_FCOE_SPARAMS_CMD_PORTID(x) ((x) << S_FW_FCOE_SPARAMS_CMD_PORTID) #define G_FW_FCOE_SPARAMS_CMD_PORTID(x) \ (((x) >> S_FW_FCOE_SPARAMS_CMD_PORTID) & M_FW_FCOE_SPARAMS_CMD_PORTID) struct fw_fcoe_stats_cmd { __be32 op_to_flowid; __be32 free_to_len16; union fw_fcoe_stats { struct fw_fcoe_stats_ctl { __u8 nstats_port; __u8 port_valid_ix; __be16 r6; __be32 r7; __be64 stat0; __be64 stat1; __be64 stat2; __be64 stat3; __be64 stat4; __be64 stat5; } ctl; struct fw_fcoe_port_stats { __be64 tx_bcast_bytes; __be64 tx_bcast_frames; __be64 tx_mcast_bytes; __be64 tx_mcast_frames; __be64 tx_ucast_bytes; __be64 tx_ucast_frames; __be64 tx_drop_frames; __be64 tx_offload_bytes; __be64 tx_offload_frames; __be64 rx_bcast_bytes; __be64 rx_bcast_frames; __be64 rx_mcast_bytes; __be64 rx_mcast_frames; __be64 rx_ucast_bytes; __be64 rx_ucast_frames; __be64 rx_err_frames; } port_stats; struct fw_fcoe_fcf_stats { __be32 fip_tx_bytes; __be32 fip_tx_fr; __be64 fcf_ka; __be64 mcast_adv_rcvd; __be16 ucast_adv_rcvd; __be16 sol_sent; __be16 vlan_req; __be16 vlan_rpl; __be16 clr_vlink; __be16 link_down; __be16 link_up; __be16 logo; __be16 flogi_req; __be16 flogi_rpl; __be16 fdisc_req; __be16 fdisc_rpl; __be16 fka_prd_chg; __be16 fc_map_chg; __be16 vfid_chg; __u8 no_fka_req; __u8 no_vnp; } fcf_stats; struct fw_fcoe_pcb_stats { __be64 tx_bytes; __be64 tx_frames; __be64 rx_bytes; __be64 rx_frames; __be32 vnp_ka; __be32 unsol_els_rcvd; __be64 unsol_cmd_rcvd; __be16 implicit_logo; __be16 flogi_inv_sparm; __be16 fdisc_inv_sparm; __be16 flogi_rjt; __be16 fdisc_rjt; __be16 no_ssn; __be16 mac_flt_fail; __be16 inv_fr_rcvd; } pcb_stats; struct fw_fcoe_scb_stats { __be64 tx_bytes; __be64 tx_frames; __be64 rx_bytes; __be64 rx_frames; __be32 host_abrt_req; __be32 adap_auto_abrt; __be32 adap_abrt_rsp; __be32 host_ios_req; __be16 ssn_offl_ios; __be16 ssn_not_rdy_ios; __u8 rx_data_ddp_err; __u8 ddp_flt_set_err; __be16 rx_data_fr_err; __u8 bad_st_abrt_req; __u8 no_io_abrt_req; __u8 abort_tmo; __u8 abort_tmo_2; __be32 abort_req; __u8 no_ppod_res_tmo; __u8 bp_tmo; __u8 adap_auto_cls; __u8 no_io_cls_req; __be32 host_cls_req; __be64 unsol_cmd_rcvd; __be32 plogi_req_rcvd; __be32 prli_req_rcvd; __be16 logo_req_rcvd; __be16 prlo_req_rcvd; __be16 plogi_rjt_rcvd; __be16 prli_rjt_rcvd; __be32 adisc_req_rcvd; __be32 rscn_rcvd; __be32 rrq_req_rcvd; __be32 unsol_els_rcvd; __u8 adisc_rjt_rcvd; __u8 scr_rjt; __u8 ct_rjt; __u8 inval_bls_rcvd; __be32 ba_rjt_rcvd; } scb_stats; } u; }; #define S_FW_FCOE_STATS_CMD_FLOWID 0 #define M_FW_FCOE_STATS_CMD_FLOWID 0xfffff #define V_FW_FCOE_STATS_CMD_FLOWID(x) ((x) << S_FW_FCOE_STATS_CMD_FLOWID) #define G_FW_FCOE_STATS_CMD_FLOWID(x) \ (((x) >> S_FW_FCOE_STATS_CMD_FLOWID) & M_FW_FCOE_STATS_CMD_FLOWID) #define S_FW_FCOE_STATS_CMD_FREE 30 #define M_FW_FCOE_STATS_CMD_FREE 0x1 #define V_FW_FCOE_STATS_CMD_FREE(x) ((x) << S_FW_FCOE_STATS_CMD_FREE) #define G_FW_FCOE_STATS_CMD_FREE(x) \ (((x) >> S_FW_FCOE_STATS_CMD_FREE) & M_FW_FCOE_STATS_CMD_FREE) #define F_FW_FCOE_STATS_CMD_FREE V_FW_FCOE_STATS_CMD_FREE(1U) #define S_FW_FCOE_STATS_CMD_NSTATS 4 #define M_FW_FCOE_STATS_CMD_NSTATS 0x7 #define V_FW_FCOE_STATS_CMD_NSTATS(x) ((x) << S_FW_FCOE_STATS_CMD_NSTATS) #define G_FW_FCOE_STATS_CMD_NSTATS(x) \ (((x) >> S_FW_FCOE_STATS_CMD_NSTATS) & M_FW_FCOE_STATS_CMD_NSTATS) #define S_FW_FCOE_STATS_CMD_PORT 0 #define M_FW_FCOE_STATS_CMD_PORT 0x3 #define V_FW_FCOE_STATS_CMD_PORT(x) ((x) << S_FW_FCOE_STATS_CMD_PORT) #define G_FW_FCOE_STATS_CMD_PORT(x) \ (((x) >> S_FW_FCOE_STATS_CMD_PORT) & M_FW_FCOE_STATS_CMD_PORT) #define S_FW_FCOE_STATS_CMD_PORT_VALID 7 #define M_FW_FCOE_STATS_CMD_PORT_VALID 0x1 #define V_FW_FCOE_STATS_CMD_PORT_VALID(x) \ ((x) << S_FW_FCOE_STATS_CMD_PORT_VALID) #define G_FW_FCOE_STATS_CMD_PORT_VALID(x) \ (((x) >> S_FW_FCOE_STATS_CMD_PORT_VALID) & M_FW_FCOE_STATS_CMD_PORT_VALID) #define F_FW_FCOE_STATS_CMD_PORT_VALID V_FW_FCOE_STATS_CMD_PORT_VALID(1U) #define S_FW_FCOE_STATS_CMD_IX 0 #define M_FW_FCOE_STATS_CMD_IX 0x3f #define V_FW_FCOE_STATS_CMD_IX(x) ((x) << S_FW_FCOE_STATS_CMD_IX) #define G_FW_FCOE_STATS_CMD_IX(x) \ (((x) >> S_FW_FCOE_STATS_CMD_IX) & M_FW_FCOE_STATS_CMD_IX) struct fw_fcoe_fcf_cmd { __be32 op_to_fcfi; __be32 retval_len16; __be16 priority_pkd; __u8 mac[6]; __u8 name_id[8]; __u8 fabric[8]; __be16 vf_id; __be16 max_fcoe_size; __u8 vlan_id; __u8 fc_map[3]; __be32 fka_adv; __be32 r6; __u8 r7_hi; __u8 fpma_to_portid; __u8 spma_mac[6]; __be64 r8; }; #define S_FW_FCOE_FCF_CMD_FCFI 0 #define M_FW_FCOE_FCF_CMD_FCFI 0xfffff #define V_FW_FCOE_FCF_CMD_FCFI(x) ((x) << S_FW_FCOE_FCF_CMD_FCFI) #define G_FW_FCOE_FCF_CMD_FCFI(x) \ (((x) >> S_FW_FCOE_FCF_CMD_FCFI) & M_FW_FCOE_FCF_CMD_FCFI) #define S_FW_FCOE_FCF_CMD_PRIORITY 0 #define M_FW_FCOE_FCF_CMD_PRIORITY 0xff #define V_FW_FCOE_FCF_CMD_PRIORITY(x) ((x) << S_FW_FCOE_FCF_CMD_PRIORITY) #define G_FW_FCOE_FCF_CMD_PRIORITY(x) \ (((x) >> S_FW_FCOE_FCF_CMD_PRIORITY) & M_FW_FCOE_FCF_CMD_PRIORITY) #define S_FW_FCOE_FCF_CMD_FPMA 6 #define M_FW_FCOE_FCF_CMD_FPMA 0x1 #define V_FW_FCOE_FCF_CMD_FPMA(x) ((x) << S_FW_FCOE_FCF_CMD_FPMA) #define G_FW_FCOE_FCF_CMD_FPMA(x) \ (((x) >> S_FW_FCOE_FCF_CMD_FPMA) & M_FW_FCOE_FCF_CMD_FPMA) #define F_FW_FCOE_FCF_CMD_FPMA V_FW_FCOE_FCF_CMD_FPMA(1U) #define S_FW_FCOE_FCF_CMD_SPMA 5 #define M_FW_FCOE_FCF_CMD_SPMA 0x1 #define V_FW_FCOE_FCF_CMD_SPMA(x) ((x) << S_FW_FCOE_FCF_CMD_SPMA) #define G_FW_FCOE_FCF_CMD_SPMA(x) \ (((x) >> S_FW_FCOE_FCF_CMD_SPMA) & M_FW_FCOE_FCF_CMD_SPMA) #define F_FW_FCOE_FCF_CMD_SPMA V_FW_FCOE_FCF_CMD_SPMA(1U) #define S_FW_FCOE_FCF_CMD_LOGIN 4 #define M_FW_FCOE_FCF_CMD_LOGIN 0x1 #define V_FW_FCOE_FCF_CMD_LOGIN(x) ((x) << S_FW_FCOE_FCF_CMD_LOGIN) #define G_FW_FCOE_FCF_CMD_LOGIN(x) \ (((x) >> S_FW_FCOE_FCF_CMD_LOGIN) & M_FW_FCOE_FCF_CMD_LOGIN) #define F_FW_FCOE_FCF_CMD_LOGIN V_FW_FCOE_FCF_CMD_LOGIN(1U) #define S_FW_FCOE_FCF_CMD_PORTID 0 #define M_FW_FCOE_FCF_CMD_PORTID 0xf #define V_FW_FCOE_FCF_CMD_PORTID(x) ((x) << S_FW_FCOE_FCF_CMD_PORTID) #define G_FW_FCOE_FCF_CMD_PORTID(x) \ (((x) >> S_FW_FCOE_FCF_CMD_PORTID) & M_FW_FCOE_FCF_CMD_PORTID) /****************************************************************************** * E R R O R a n d D E B U G C O M M A N D s ******************************************************/ enum fw_error_type { FW_ERROR_TYPE_EXCEPTION = 0x0, FW_ERROR_TYPE_HWMODULE = 0x1, FW_ERROR_TYPE_WR = 0x2, FW_ERROR_TYPE_ACL = 0x3, }; enum fw_dcb_ieee_locations { FW_IEEE_LOC_LOCAL, FW_IEEE_LOC_PEER, FW_IEEE_LOC_OPERATIONAL, }; struct fw_dcb_ieee_cmd { __be32 op_to_location; __be32 changed_to_len16; union fw_dcbx_stats { struct fw_dcbx_pfc_stats_ieee { __be32 pfc_mbc_pkd; __be32 pfc_willing_to_pfc_en; } dcbx_pfc_stats; struct fw_dcbx_ets_stats_ieee { __be32 cbs_to_ets_max_tc; __be32 pg_table; __u8 pg_percent[8]; __u8 tsa[8]; } dcbx_ets_stats; struct fw_dcbx_app_stats_ieee { __be32 num_apps_pkd; __be32 r6; __be32 app[4]; } dcbx_app_stats; struct fw_dcbx_control { __be32 multi_peer_invalidated; __u8 version; __u8 r6[3]; } dcbx_control; } u; }; #define S_FW_DCB_IEEE_CMD_PORT 8 #define M_FW_DCB_IEEE_CMD_PORT 0x7 #define V_FW_DCB_IEEE_CMD_PORT(x) ((x) << S_FW_DCB_IEEE_CMD_PORT) #define G_FW_DCB_IEEE_CMD_PORT(x) \ (((x) >> S_FW_DCB_IEEE_CMD_PORT) & M_FW_DCB_IEEE_CMD_PORT) #define S_FW_DCB_IEEE_CMD_FEATURE 2 #define M_FW_DCB_IEEE_CMD_FEATURE 0x7 #define V_FW_DCB_IEEE_CMD_FEATURE(x) ((x) << S_FW_DCB_IEEE_CMD_FEATURE) #define G_FW_DCB_IEEE_CMD_FEATURE(x) \ (((x) >> S_FW_DCB_IEEE_CMD_FEATURE) & M_FW_DCB_IEEE_CMD_FEATURE) #define S_FW_DCB_IEEE_CMD_LOCATION 0 #define M_FW_DCB_IEEE_CMD_LOCATION 0x3 #define V_FW_DCB_IEEE_CMD_LOCATION(x) ((x) << S_FW_DCB_IEEE_CMD_LOCATION) #define G_FW_DCB_IEEE_CMD_LOCATION(x) \ (((x) >> S_FW_DCB_IEEE_CMD_LOCATION) & M_FW_DCB_IEEE_CMD_LOCATION) #define S_FW_DCB_IEEE_CMD_CHANGED 20 #define M_FW_DCB_IEEE_CMD_CHANGED 0x1 #define V_FW_DCB_IEEE_CMD_CHANGED(x) ((x) << S_FW_DCB_IEEE_CMD_CHANGED) #define G_FW_DCB_IEEE_CMD_CHANGED(x) \ (((x) >> S_FW_DCB_IEEE_CMD_CHANGED) & M_FW_DCB_IEEE_CMD_CHANGED) #define F_FW_DCB_IEEE_CMD_CHANGED V_FW_DCB_IEEE_CMD_CHANGED(1U) #define S_FW_DCB_IEEE_CMD_RECEIVED 19 #define M_FW_DCB_IEEE_CMD_RECEIVED 0x1 #define V_FW_DCB_IEEE_CMD_RECEIVED(x) ((x) << S_FW_DCB_IEEE_CMD_RECEIVED) #define G_FW_DCB_IEEE_CMD_RECEIVED(x) \ (((x) >> S_FW_DCB_IEEE_CMD_RECEIVED) & M_FW_DCB_IEEE_CMD_RECEIVED) #define F_FW_DCB_IEEE_CMD_RECEIVED V_FW_DCB_IEEE_CMD_RECEIVED(1U) #define S_FW_DCB_IEEE_CMD_APPLY 18 #define M_FW_DCB_IEEE_CMD_APPLY 0x1 #define V_FW_DCB_IEEE_CMD_APPLY(x) ((x) << S_FW_DCB_IEEE_CMD_APPLY) #define G_FW_DCB_IEEE_CMD_APPLY(x) \ (((x) >> S_FW_DCB_IEEE_CMD_APPLY) & M_FW_DCB_IEEE_CMD_APPLY) #define F_FW_DCB_IEEE_CMD_APPLY V_FW_DCB_IEEE_CMD_APPLY(1U) #define S_FW_DCB_IEEE_CMD_DISABLED 17 #define M_FW_DCB_IEEE_CMD_DISABLED 0x1 #define V_FW_DCB_IEEE_CMD_DISABLED(x) ((x) << S_FW_DCB_IEEE_CMD_DISABLED) #define G_FW_DCB_IEEE_CMD_DISABLED(x) \ (((x) >> S_FW_DCB_IEEE_CMD_DISABLED) & M_FW_DCB_IEEE_CMD_DISABLED) #define F_FW_DCB_IEEE_CMD_DISABLED V_FW_DCB_IEEE_CMD_DISABLED(1U) #define S_FW_DCB_IEEE_CMD_MORE 16 #define M_FW_DCB_IEEE_CMD_MORE 0x1 #define V_FW_DCB_IEEE_CMD_MORE(x) ((x) << S_FW_DCB_IEEE_CMD_MORE) #define G_FW_DCB_IEEE_CMD_MORE(x) \ (((x) >> S_FW_DCB_IEEE_CMD_MORE) & M_FW_DCB_IEEE_CMD_MORE) #define F_FW_DCB_IEEE_CMD_MORE V_FW_DCB_IEEE_CMD_MORE(1U) #define S_FW_DCB_IEEE_CMD_PFC_MBC 0 #define M_FW_DCB_IEEE_CMD_PFC_MBC 0x1 #define V_FW_DCB_IEEE_CMD_PFC_MBC(x) ((x) << S_FW_DCB_IEEE_CMD_PFC_MBC) #define G_FW_DCB_IEEE_CMD_PFC_MBC(x) \ (((x) >> S_FW_DCB_IEEE_CMD_PFC_MBC) & M_FW_DCB_IEEE_CMD_PFC_MBC) #define F_FW_DCB_IEEE_CMD_PFC_MBC V_FW_DCB_IEEE_CMD_PFC_MBC(1U) #define S_FW_DCB_IEEE_CMD_PFC_WILLING 16 #define M_FW_DCB_IEEE_CMD_PFC_WILLING 0x1 #define V_FW_DCB_IEEE_CMD_PFC_WILLING(x) \ ((x) << S_FW_DCB_IEEE_CMD_PFC_WILLING) #define G_FW_DCB_IEEE_CMD_PFC_WILLING(x) \ (((x) >> S_FW_DCB_IEEE_CMD_PFC_WILLING) & M_FW_DCB_IEEE_CMD_PFC_WILLING) #define F_FW_DCB_IEEE_CMD_PFC_WILLING V_FW_DCB_IEEE_CMD_PFC_WILLING(1U) #define S_FW_DCB_IEEE_CMD_PFC_MAX_TC 8 #define M_FW_DCB_IEEE_CMD_PFC_MAX_TC 0xff #define V_FW_DCB_IEEE_CMD_PFC_MAX_TC(x) ((x) << S_FW_DCB_IEEE_CMD_PFC_MAX_TC) #define G_FW_DCB_IEEE_CMD_PFC_MAX_TC(x) \ (((x) >> S_FW_DCB_IEEE_CMD_PFC_MAX_TC) & M_FW_DCB_IEEE_CMD_PFC_MAX_TC) #define S_FW_DCB_IEEE_CMD_PFC_EN 0 #define M_FW_DCB_IEEE_CMD_PFC_EN 0xff #define V_FW_DCB_IEEE_CMD_PFC_EN(x) ((x) << S_FW_DCB_IEEE_CMD_PFC_EN) #define G_FW_DCB_IEEE_CMD_PFC_EN(x) \ (((x) >> S_FW_DCB_IEEE_CMD_PFC_EN) & M_FW_DCB_IEEE_CMD_PFC_EN) #define S_FW_DCB_IEEE_CMD_CBS 16 #define M_FW_DCB_IEEE_CMD_CBS 0x1 #define V_FW_DCB_IEEE_CMD_CBS(x) ((x) << S_FW_DCB_IEEE_CMD_CBS) #define G_FW_DCB_IEEE_CMD_CBS(x) \ (((x) >> S_FW_DCB_IEEE_CMD_CBS) & M_FW_DCB_IEEE_CMD_CBS) #define F_FW_DCB_IEEE_CMD_CBS V_FW_DCB_IEEE_CMD_CBS(1U) #define S_FW_DCB_IEEE_CMD_ETS_WILLING 8 #define M_FW_DCB_IEEE_CMD_ETS_WILLING 0x1 #define V_FW_DCB_IEEE_CMD_ETS_WILLING(x) \ ((x) << S_FW_DCB_IEEE_CMD_ETS_WILLING) #define G_FW_DCB_IEEE_CMD_ETS_WILLING(x) \ (((x) >> S_FW_DCB_IEEE_CMD_ETS_WILLING) & M_FW_DCB_IEEE_CMD_ETS_WILLING) #define F_FW_DCB_IEEE_CMD_ETS_WILLING V_FW_DCB_IEEE_CMD_ETS_WILLING(1U) #define S_FW_DCB_IEEE_CMD_ETS_MAX_TC 0 #define M_FW_DCB_IEEE_CMD_ETS_MAX_TC 0xff #define V_FW_DCB_IEEE_CMD_ETS_MAX_TC(x) ((x) << S_FW_DCB_IEEE_CMD_ETS_MAX_TC) #define G_FW_DCB_IEEE_CMD_ETS_MAX_TC(x) \ (((x) >> S_FW_DCB_IEEE_CMD_ETS_MAX_TC) & M_FW_DCB_IEEE_CMD_ETS_MAX_TC) #define S_FW_DCB_IEEE_CMD_NUM_APPS 0 #define M_FW_DCB_IEEE_CMD_NUM_APPS 0x7 #define V_FW_DCB_IEEE_CMD_NUM_APPS(x) ((x) << S_FW_DCB_IEEE_CMD_NUM_APPS) #define G_FW_DCB_IEEE_CMD_NUM_APPS(x) \ (((x) >> S_FW_DCB_IEEE_CMD_NUM_APPS) & M_FW_DCB_IEEE_CMD_NUM_APPS) #define S_FW_DCB_IEEE_CMD_MULTI_PEER 31 #define M_FW_DCB_IEEE_CMD_MULTI_PEER 0x1 #define V_FW_DCB_IEEE_CMD_MULTI_PEER(x) ((x) << S_FW_DCB_IEEE_CMD_MULTI_PEER) #define G_FW_DCB_IEEE_CMD_MULTI_PEER(x) \ (((x) >> S_FW_DCB_IEEE_CMD_MULTI_PEER) & M_FW_DCB_IEEE_CMD_MULTI_PEER) #define F_FW_DCB_IEEE_CMD_MULTI_PEER V_FW_DCB_IEEE_CMD_MULTI_PEER(1U) #define S_FW_DCB_IEEE_CMD_INVALIDATED 30 #define M_FW_DCB_IEEE_CMD_INVALIDATED 0x1 #define V_FW_DCB_IEEE_CMD_INVALIDATED(x) \ ((x) << S_FW_DCB_IEEE_CMD_INVALIDATED) #define G_FW_DCB_IEEE_CMD_INVALIDATED(x) \ (((x) >> S_FW_DCB_IEEE_CMD_INVALIDATED) & M_FW_DCB_IEEE_CMD_INVALIDATED) #define F_FW_DCB_IEEE_CMD_INVALIDATED V_FW_DCB_IEEE_CMD_INVALIDATED(1U) /* Hand-written */ #define S_FW_DCB_IEEE_CMD_APP_PROTOCOL 16 #define M_FW_DCB_IEEE_CMD_APP_PROTOCOL 0xffff #define V_FW_DCB_IEEE_CMD_APP_PROTOCOL(x) ((x) << S_FW_DCB_IEEE_CMD_APP_PROTOCOL) #define G_FW_DCB_IEEE_CMD_APP_PROTOCOL(x) \ (((x) >> S_FW_DCB_IEEE_CMD_APP_PROTOCOL) & M_FW_DCB_IEEE_CMD_APP_PROTOCOL) #define S_FW_DCB_IEEE_CMD_APP_SELECT 3 #define M_FW_DCB_IEEE_CMD_APP_SELECT 0x7 #define V_FW_DCB_IEEE_CMD_APP_SELECT(x) ((x) << S_FW_DCB_IEEE_CMD_APP_SELECT) #define G_FW_DCB_IEEE_CMD_APP_SELECT(x) \ (((x) >> S_FW_DCB_IEEE_CMD_APP_SELECT) & M_FW_DCB_IEEE_CMD_APP_SELECT) #define S_FW_DCB_IEEE_CMD_APP_PRIORITY 0 #define M_FW_DCB_IEEE_CMD_APP_PRIORITY 0x7 #define V_FW_DCB_IEEE_CMD_APP_PRIORITY(x) ((x) << S_FW_DCB_IEEE_CMD_APP_PRIORITY) #define G_FW_DCB_IEEE_CMD_APP_PRIORITY(x) \ (((x) >> S_FW_DCB_IEEE_CMD_APP_PRIORITY) & M_FW_DCB_IEEE_CMD_APP_PRIORITY) struct fw_error_cmd { __be32 op_to_type; __be32 len16_pkd; union fw_error { struct fw_error_exception { __be32 info[6]; } exception; struct fw_error_hwmodule { __be32 regaddr; __be32 regval; } hwmodule; struct fw_error_wr { __be16 cidx; __be16 pfn_vfn; __be32 eqid; __u8 wrhdr[16]; } wr; struct fw_error_acl { __be16 cidx; __be16 pfn_vfn; __be32 eqid; __be16 mv_pkd; __u8 val[6]; __be64 r4; } acl; } u; }; #define S_FW_ERROR_CMD_FATAL 4 #define M_FW_ERROR_CMD_FATAL 0x1 #define V_FW_ERROR_CMD_FATAL(x) ((x) << S_FW_ERROR_CMD_FATAL) #define G_FW_ERROR_CMD_FATAL(x) \ (((x) >> S_FW_ERROR_CMD_FATAL) & M_FW_ERROR_CMD_FATAL) #define F_FW_ERROR_CMD_FATAL V_FW_ERROR_CMD_FATAL(1U) #define S_FW_ERROR_CMD_TYPE 0 #define M_FW_ERROR_CMD_TYPE 0xf #define V_FW_ERROR_CMD_TYPE(x) ((x) << S_FW_ERROR_CMD_TYPE) #define G_FW_ERROR_CMD_TYPE(x) \ (((x) >> S_FW_ERROR_CMD_TYPE) & M_FW_ERROR_CMD_TYPE) #define S_FW_ERROR_CMD_PFN 8 #define M_FW_ERROR_CMD_PFN 0x7 #define V_FW_ERROR_CMD_PFN(x) ((x) << S_FW_ERROR_CMD_PFN) #define G_FW_ERROR_CMD_PFN(x) \ (((x) >> S_FW_ERROR_CMD_PFN) & M_FW_ERROR_CMD_PFN) #define S_FW_ERROR_CMD_VFN 0 #define M_FW_ERROR_CMD_VFN 0xff #define V_FW_ERROR_CMD_VFN(x) ((x) << S_FW_ERROR_CMD_VFN) #define G_FW_ERROR_CMD_VFN(x) \ (((x) >> S_FW_ERROR_CMD_VFN) & M_FW_ERROR_CMD_VFN) #define S_FW_ERROR_CMD_PFN 8 #define M_FW_ERROR_CMD_PFN 0x7 #define V_FW_ERROR_CMD_PFN(x) ((x) << S_FW_ERROR_CMD_PFN) #define G_FW_ERROR_CMD_PFN(x) \ (((x) >> S_FW_ERROR_CMD_PFN) & M_FW_ERROR_CMD_PFN) #define S_FW_ERROR_CMD_VFN 0 #define M_FW_ERROR_CMD_VFN 0xff #define V_FW_ERROR_CMD_VFN(x) ((x) << S_FW_ERROR_CMD_VFN) #define G_FW_ERROR_CMD_VFN(x) \ (((x) >> S_FW_ERROR_CMD_VFN) & M_FW_ERROR_CMD_VFN) #define S_FW_ERROR_CMD_MV 15 #define M_FW_ERROR_CMD_MV 0x1 #define V_FW_ERROR_CMD_MV(x) ((x) << S_FW_ERROR_CMD_MV) #define G_FW_ERROR_CMD_MV(x) \ (((x) >> S_FW_ERROR_CMD_MV) & M_FW_ERROR_CMD_MV) #define F_FW_ERROR_CMD_MV V_FW_ERROR_CMD_MV(1U) struct fw_debug_cmd { __be32 op_type; __be32 len16_pkd; union fw_debug { struct fw_debug_assert { __be32 fcid; __be32 line; __be32 x; __be32 y; __u8 filename_0_7[8]; __u8 filename_8_15[8]; __be64 r3; } assert; struct fw_debug_prt { __be16 dprtstridx; __be16 r3[3]; __be32 dprtstrparam0; __be32 dprtstrparam1; __be32 dprtstrparam2; __be32 dprtstrparam3; } prt; } u; }; #define S_FW_DEBUG_CMD_TYPE 0 #define M_FW_DEBUG_CMD_TYPE 0xff #define V_FW_DEBUG_CMD_TYPE(x) ((x) << S_FW_DEBUG_CMD_TYPE) #define G_FW_DEBUG_CMD_TYPE(x) \ (((x) >> S_FW_DEBUG_CMD_TYPE) & M_FW_DEBUG_CMD_TYPE) enum fw_diag_cmd_type { FW_DIAG_CMD_TYPE_OFLDIAG = 0, }; enum fw_diag_cmd_ofldiag_op { FW_DIAG_CMD_OFLDIAG_TEST_NONE = 0, FW_DIAG_CMD_OFLDIAG_TEST_START, FW_DIAG_CMD_OFLDIAG_TEST_STOP, FW_DIAG_CMD_OFLDIAG_TEST_STATUS, }; enum fw_diag_cmd_ofldiag_status { FW_DIAG_CMD_OFLDIAG_STATUS_IDLE = 0, FW_DIAG_CMD_OFLDIAG_STATUS_RUNNING, FW_DIAG_CMD_OFLDIAG_STATUS_FAILED, FW_DIAG_CMD_OFLDIAG_STATUS_PASSED, }; struct fw_diag_cmd { __be32 op_type; __be32 len16_pkd; union fw_diag_test { struct fw_diag_test_ofldiag { __u8 test_op; __u8 r3; __be16 test_status; __be32 duration; } ofldiag; } u; }; #define S_FW_DIAG_CMD_TYPE 0 #define M_FW_DIAG_CMD_TYPE 0xff #define V_FW_DIAG_CMD_TYPE(x) ((x) << S_FW_DIAG_CMD_TYPE) #define G_FW_DIAG_CMD_TYPE(x) \ (((x) >> S_FW_DIAG_CMD_TYPE) & M_FW_DIAG_CMD_TYPE) struct fw_hma_cmd { __be32 op_pkd; __be32 retval_len16; __be32 mode_to_pcie_params; __be32 naddr_size; __be32 addr_size_pkd; __be32 r6; __be64 phy_address[5]; }; #define S_FW_HMA_CMD_MODE 31 #define M_FW_HMA_CMD_MODE 0x1 #define V_FW_HMA_CMD_MODE(x) ((x) << S_FW_HMA_CMD_MODE) #define G_FW_HMA_CMD_MODE(x) \ (((x) >> S_FW_HMA_CMD_MODE) & M_FW_HMA_CMD_MODE) #define F_FW_HMA_CMD_MODE V_FW_HMA_CMD_MODE(1U) #define S_FW_HMA_CMD_SOC 30 #define M_FW_HMA_CMD_SOC 0x1 #define V_FW_HMA_CMD_SOC(x) ((x) << S_FW_HMA_CMD_SOC) #define G_FW_HMA_CMD_SOC(x) (((x) >> S_FW_HMA_CMD_SOC) & M_FW_HMA_CMD_SOC) #define F_FW_HMA_CMD_SOC V_FW_HMA_CMD_SOC(1U) #define S_FW_HMA_CMD_EOC 29 #define M_FW_HMA_CMD_EOC 0x1 #define V_FW_HMA_CMD_EOC(x) ((x) << S_FW_HMA_CMD_EOC) #define G_FW_HMA_CMD_EOC(x) (((x) >> S_FW_HMA_CMD_EOC) & M_FW_HMA_CMD_EOC) #define F_FW_HMA_CMD_EOC V_FW_HMA_CMD_EOC(1U) #define S_FW_HMA_CMD_PCIE_PARAMS 0 #define M_FW_HMA_CMD_PCIE_PARAMS 0x7ffffff #define V_FW_HMA_CMD_PCIE_PARAMS(x) ((x) << S_FW_HMA_CMD_PCIE_PARAMS) #define G_FW_HMA_CMD_PCIE_PARAMS(x) \ (((x) >> S_FW_HMA_CMD_PCIE_PARAMS) & M_FW_HMA_CMD_PCIE_PARAMS) #define S_FW_HMA_CMD_NADDR 12 #define M_FW_HMA_CMD_NADDR 0x3f #define V_FW_HMA_CMD_NADDR(x) ((x) << S_FW_HMA_CMD_NADDR) #define G_FW_HMA_CMD_NADDR(x) \ (((x) >> S_FW_HMA_CMD_NADDR) & M_FW_HMA_CMD_NADDR) #define S_FW_HMA_CMD_SIZE 0 #define M_FW_HMA_CMD_SIZE 0xfff #define V_FW_HMA_CMD_SIZE(x) ((x) << S_FW_HMA_CMD_SIZE) #define G_FW_HMA_CMD_SIZE(x) \ (((x) >> S_FW_HMA_CMD_SIZE) & M_FW_HMA_CMD_SIZE) #define S_FW_HMA_CMD_ADDR_SIZE 11 #define M_FW_HMA_CMD_ADDR_SIZE 0x1fffff #define V_FW_HMA_CMD_ADDR_SIZE(x) ((x) << S_FW_HMA_CMD_ADDR_SIZE) #define G_FW_HMA_CMD_ADDR_SIZE(x) \ (((x) >> S_FW_HMA_CMD_ADDR_SIZE) & M_FW_HMA_CMD_ADDR_SIZE) /****************************************************************************** * P C I E F W R E G I S T E R **************************************/ enum pcie_fw_eval { PCIE_FW_EVAL_CRASH = 0, PCIE_FW_EVAL_PREP = 1, PCIE_FW_EVAL_CONF = 2, PCIE_FW_EVAL_INIT = 3, PCIE_FW_EVAL_UNEXPECTEDEVENT = 4, PCIE_FW_EVAL_OVERHEAT = 5, PCIE_FW_EVAL_DEVICESHUTDOWN = 6, }; /** * Register definitions for the PCIE_FW register which the firmware uses * to retain status across RESETs. This register should be considered * as a READ-ONLY register for Host Software and only to be used to * track firmware initialization/error state, etc. */ #define S_PCIE_FW_ERR 31 #define M_PCIE_FW_ERR 0x1 #define V_PCIE_FW_ERR(x) ((x) << S_PCIE_FW_ERR) #define G_PCIE_FW_ERR(x) (((x) >> S_PCIE_FW_ERR) & M_PCIE_FW_ERR) #define F_PCIE_FW_ERR V_PCIE_FW_ERR(1U) #define S_PCIE_FW_INIT 30 #define M_PCIE_FW_INIT 0x1 #define V_PCIE_FW_INIT(x) ((x) << S_PCIE_FW_INIT) #define G_PCIE_FW_INIT(x) (((x) >> S_PCIE_FW_INIT) & M_PCIE_FW_INIT) #define F_PCIE_FW_INIT V_PCIE_FW_INIT(1U) #define S_PCIE_FW_HALT 29 #define M_PCIE_FW_HALT 0x1 #define V_PCIE_FW_HALT(x) ((x) << S_PCIE_FW_HALT) #define G_PCIE_FW_HALT(x) (((x) >> S_PCIE_FW_HALT) & M_PCIE_FW_HALT) #define F_PCIE_FW_HALT V_PCIE_FW_HALT(1U) #define S_PCIE_FW_EVAL 24 #define M_PCIE_FW_EVAL 0x7 #define V_PCIE_FW_EVAL(x) ((x) << S_PCIE_FW_EVAL) #define G_PCIE_FW_EVAL(x) (((x) >> S_PCIE_FW_EVAL) & M_PCIE_FW_EVAL) #define S_PCIE_FW_STAGE 21 #define M_PCIE_FW_STAGE 0x7 #define V_PCIE_FW_STAGE(x) ((x) << S_PCIE_FW_STAGE) #define G_PCIE_FW_STAGE(x) (((x) >> S_PCIE_FW_STAGE) & M_PCIE_FW_STAGE) #define S_PCIE_FW_ASYNCNOT_VLD 20 #define M_PCIE_FW_ASYNCNOT_VLD 0x1 #define V_PCIE_FW_ASYNCNOT_VLD(x) \ ((x) << S_PCIE_FW_ASYNCNOT_VLD) #define G_PCIE_FW_ASYNCNOT_VLD(x) \ (((x) >> S_PCIE_FW_ASYNCNOT_VLD) & M_PCIE_FW_ASYNCNOT_VLD) #define F_PCIE_FW_ASYNCNOT_VLD V_PCIE_FW_ASYNCNOT_VLD(1U) #define S_PCIE_FW_ASYNCNOTINT 19 #define M_PCIE_FW_ASYNCNOTINT 0x1 #define V_PCIE_FW_ASYNCNOTINT(x) \ ((x) << S_PCIE_FW_ASYNCNOTINT) #define G_PCIE_FW_ASYNCNOTINT(x) \ (((x) >> S_PCIE_FW_ASYNCNOTINT) & M_PCIE_FW_ASYNCNOTINT) #define F_PCIE_FW_ASYNCNOTINT V_PCIE_FW_ASYNCNOTINT(1U) #define S_PCIE_FW_ASYNCNOT 16 #define M_PCIE_FW_ASYNCNOT 0x7 #define V_PCIE_FW_ASYNCNOT(x) ((x) << S_PCIE_FW_ASYNCNOT) #define G_PCIE_FW_ASYNCNOT(x) \ (((x) >> S_PCIE_FW_ASYNCNOT) & M_PCIE_FW_ASYNCNOT) #define S_PCIE_FW_MASTER_VLD 15 #define M_PCIE_FW_MASTER_VLD 0x1 #define V_PCIE_FW_MASTER_VLD(x) ((x) << S_PCIE_FW_MASTER_VLD) #define G_PCIE_FW_MASTER_VLD(x) \ (((x) >> S_PCIE_FW_MASTER_VLD) & M_PCIE_FW_MASTER_VLD) #define F_PCIE_FW_MASTER_VLD V_PCIE_FW_MASTER_VLD(1U) #define S_PCIE_FW_MASTER 12 #define M_PCIE_FW_MASTER 0x7 #define V_PCIE_FW_MASTER(x) ((x) << S_PCIE_FW_MASTER) #define G_PCIE_FW_MASTER(x) (((x) >> S_PCIE_FW_MASTER) & M_PCIE_FW_MASTER) #define S_PCIE_FW_RESET_VLD 11 #define M_PCIE_FW_RESET_VLD 0x1 #define V_PCIE_FW_RESET_VLD(x) ((x) << S_PCIE_FW_RESET_VLD) #define G_PCIE_FW_RESET_VLD(x) \ (((x) >> S_PCIE_FW_RESET_VLD) & M_PCIE_FW_RESET_VLD) #define F_PCIE_FW_RESET_VLD V_PCIE_FW_RESET_VLD(1U) #define S_PCIE_FW_RESET 8 #define M_PCIE_FW_RESET 0x7 #define V_PCIE_FW_RESET(x) ((x) << S_PCIE_FW_RESET) #define G_PCIE_FW_RESET(x) \ (((x) >> S_PCIE_FW_RESET) & M_PCIE_FW_RESET) #define S_PCIE_FW_REGISTERED 0 #define M_PCIE_FW_REGISTERED 0xff #define V_PCIE_FW_REGISTERED(x) ((x) << S_PCIE_FW_REGISTERED) #define G_PCIE_FW_REGISTERED(x) \ (((x) >> S_PCIE_FW_REGISTERED) & M_PCIE_FW_REGISTERED) /****************************************************************************** * P C I E F W P F 0 R E G I S T E R **********************************************/ /* * this register is available as 32-bit of persistent storage (across * PL_RST based chip-reset) for boot drivers (i.e. firmware and driver * will not write it) */ /****************************************************************************** * P C I E F W P F 7 R E G I S T E R **********************************************/ /* * PF7 stores the Firmware Device Log parameters which allows Host Drivers to * access the "devlog" which needing to contact firmware. The encoding is * mostly the same as that returned by the DEVLOG command except for the size * which is encoded as the number of entries in multiples-1 of 128 here rather * than the memory size as is done in the DEVLOG command. Thus, 0 means 128 * and 15 means 2048. This of course in turn constrains the allowed values * for the devlog size ... */ #define PCIE_FW_PF_DEVLOG 7 #define S_PCIE_FW_PF_DEVLOG_NENTRIES128 28 #define M_PCIE_FW_PF_DEVLOG_NENTRIES128 0xf #define V_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \ ((x) << S_PCIE_FW_PF_DEVLOG_NENTRIES128) #define G_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \ (((x) >> S_PCIE_FW_PF_DEVLOG_NENTRIES128) & \ M_PCIE_FW_PF_DEVLOG_NENTRIES128) #define S_PCIE_FW_PF_DEVLOG_ADDR16 4 #define M_PCIE_FW_PF_DEVLOG_ADDR16 0xffffff #define V_PCIE_FW_PF_DEVLOG_ADDR16(x) ((x) << S_PCIE_FW_PF_DEVLOG_ADDR16) #define G_PCIE_FW_PF_DEVLOG_ADDR16(x) \ (((x) >> S_PCIE_FW_PF_DEVLOG_ADDR16) & M_PCIE_FW_PF_DEVLOG_ADDR16) #define S_PCIE_FW_PF_DEVLOG_MEMTYPE 0 #define M_PCIE_FW_PF_DEVLOG_MEMTYPE 0xf #define V_PCIE_FW_PF_DEVLOG_MEMTYPE(x) ((x) << S_PCIE_FW_PF_DEVLOG_MEMTYPE) #define G_PCIE_FW_PF_DEVLOG_MEMTYPE(x) \ (((x) >> S_PCIE_FW_PF_DEVLOG_MEMTYPE) & M_PCIE_FW_PF_DEVLOG_MEMTYPE) /****************************************************************************** * B I N A R Y H E A D E R F O R M A T **********************************************/ /* * firmware binary header format */ struct fw_hdr { __u8 ver; __u8 chip; /* terminator chip family */ __be16 len512; /* bin length in units of 512-bytes */ __be32 fw_ver; /* firmware version */ __be32 tp_microcode_ver; /* tcp processor microcode version */ __u8 intfver_nic; __u8 intfver_vnic; __u8 intfver_ofld; __u8 intfver_ri; __u8 intfver_iscsipdu; __u8 intfver_iscsi; __u8 intfver_fcoepdu; __u8 intfver_fcoe; __u32 reserved2; __u32 reserved3; __be32 magic; /* runtime or bootstrap fw */ __be32 flags; __be32 reserved6[23]; }; enum fw_hdr_chip { FW_HDR_CHIP_T4, FW_HDR_CHIP_T5, FW_HDR_CHIP_T6 }; #define S_FW_HDR_FW_VER_MAJOR 24 #define M_FW_HDR_FW_VER_MAJOR 0xff #define V_FW_HDR_FW_VER_MAJOR(x) \ ((x) << S_FW_HDR_FW_VER_MAJOR) #define G_FW_HDR_FW_VER_MAJOR(x) \ (((x) >> S_FW_HDR_FW_VER_MAJOR) & M_FW_HDR_FW_VER_MAJOR) #define S_FW_HDR_FW_VER_MINOR 16 #define M_FW_HDR_FW_VER_MINOR 0xff #define V_FW_HDR_FW_VER_MINOR(x) \ ((x) << S_FW_HDR_FW_VER_MINOR) #define G_FW_HDR_FW_VER_MINOR(x) \ (((x) >> S_FW_HDR_FW_VER_MINOR) & M_FW_HDR_FW_VER_MINOR) #define S_FW_HDR_FW_VER_MICRO 8 #define M_FW_HDR_FW_VER_MICRO 0xff #define V_FW_HDR_FW_VER_MICRO(x) \ ((x) << S_FW_HDR_FW_VER_MICRO) #define G_FW_HDR_FW_VER_MICRO(x) \ (((x) >> S_FW_HDR_FW_VER_MICRO) & M_FW_HDR_FW_VER_MICRO) #define S_FW_HDR_FW_VER_BUILD 0 #define M_FW_HDR_FW_VER_BUILD 0xff #define V_FW_HDR_FW_VER_BUILD(x) \ ((x) << S_FW_HDR_FW_VER_BUILD) #define G_FW_HDR_FW_VER_BUILD(x) \ (((x) >> S_FW_HDR_FW_VER_BUILD) & M_FW_HDR_FW_VER_BUILD) enum { T4FW_VERSION_MAJOR = 0x01, T4FW_VERSION_MINOR = 0x17, T4FW_VERSION_MICRO = 0x00, T4FW_VERSION_BUILD = 0x00, T5FW_VERSION_MAJOR = 0x01, T5FW_VERSION_MINOR = 0x17, T5FW_VERSION_MICRO = 0x00, T5FW_VERSION_BUILD = 0x00, T6FW_VERSION_MAJOR = 0x01, T6FW_VERSION_MINOR = 0x17, T6FW_VERSION_MICRO = 0x00, T6FW_VERSION_BUILD = 0x00, }; enum { /* T4 */ T4FW_HDR_INTFVER_NIC = 0x00, T4FW_HDR_INTFVER_VNIC = 0x00, T4FW_HDR_INTFVER_OFLD = 0x00, T4FW_HDR_INTFVER_RI = 0x00, T4FW_HDR_INTFVER_ISCSIPDU= 0x00, T4FW_HDR_INTFVER_ISCSI = 0x00, T4FW_HDR_INTFVER_FCOEPDU = 0x00, T4FW_HDR_INTFVER_FCOE = 0x00, /* T5 */ T5FW_HDR_INTFVER_NIC = 0x00, T5FW_HDR_INTFVER_VNIC = 0x00, T5FW_HDR_INTFVER_OFLD = 0x00, T5FW_HDR_INTFVER_RI = 0x00, T5FW_HDR_INTFVER_ISCSIPDU= 0x00, T5FW_HDR_INTFVER_ISCSI = 0x00, T5FW_HDR_INTFVER_FCOEPDU= 0x00, T5FW_HDR_INTFVER_FCOE = 0x00, /* T6 */ T6FW_HDR_INTFVER_NIC = 0x00, T6FW_HDR_INTFVER_VNIC = 0x00, T6FW_HDR_INTFVER_OFLD = 0x00, T6FW_HDR_INTFVER_RI = 0x00, T6FW_HDR_INTFVER_ISCSIPDU= 0x00, T6FW_HDR_INTFVER_ISCSI = 0x00, T6FW_HDR_INTFVER_FCOEPDU= 0x00, T6FW_HDR_INTFVER_FCOE = 0x00, }; enum { FW_HDR_MAGIC_RUNTIME = 0x00000000, FW_HDR_MAGIC_BOOTSTRAP = 0x626f6f74, }; enum fw_hdr_flags { FW_HDR_FLAGS_RESET_HALT = 0x00000001, }; /* * External PHY firmware binary header format */ struct fw_ephy_hdr { __u8 ver; __u8 reserved; __be16 len512; /* bin length in units of 512-bytes */ __be32 magic; __be16 vendor_id; __be16 device_id; __be32 version; __be32 reserved1[4]; }; enum { FW_EPHY_HDR_MAGIC = 0x65706879, }; struct fw_ifconf_dhcp_info { __be32 addr; __be32 mask; __be16 vlanid; __be16 mtu; __be32 gw; __u8 op; __u8 len; __u8 data[270]; }; #endif /* _T4FW_INTERFACE_H_ */ Index: stable/11/sys/dev/cxgbe/t4_main.c =================================================================== --- stable/11/sys/dev/cxgbe/t4_main.c (revision 346966) +++ stable/11/sys/dev/cxgbe/t4_main.c (revision 346967) @@ -1,10789 +1,10791 @@ /*- * Copyright (c) 2011 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #endif #include #include #if defined(__i386__) || defined(__amd64__) #include #include #include #include #endif #include #ifdef DDB #include #include #endif #include "common/common.h" #include "common/t4_msg.h" #include "common/t4_regs.h" #include "common/t4_regs_values.h" #include "cudbg/cudbg.h" #include "t4_clip.h" #include "t4_ioctl.h" #include "t4_l2t.h" #include "t4_mp_ring.h" #include "t4_if.h" #include "t4_smt.h" /* T4 bus driver interface */ static int t4_probe(device_t); static int t4_attach(device_t); static int t4_detach(device_t); static int t4_child_location_str(device_t, device_t, char *, size_t); static int t4_ready(device_t); static int t4_read_port_device(device_t, int, device_t *); static device_method_t t4_methods[] = { DEVMETHOD(device_probe, t4_probe), DEVMETHOD(device_attach, t4_attach), DEVMETHOD(device_detach, t4_detach), DEVMETHOD(bus_child_location_str, t4_child_location_str), DEVMETHOD(t4_is_main_ready, t4_ready), DEVMETHOD(t4_read_port_device, t4_read_port_device), DEVMETHOD_END }; static driver_t t4_driver = { "t4nex", t4_methods, sizeof(struct adapter) }; /* T4 port (cxgbe) interface */ static int cxgbe_probe(device_t); static int cxgbe_attach(device_t); static int cxgbe_detach(device_t); device_method_t cxgbe_methods[] = { DEVMETHOD(device_probe, cxgbe_probe), DEVMETHOD(device_attach, cxgbe_attach), DEVMETHOD(device_detach, cxgbe_detach), { 0, 0 } }; static driver_t cxgbe_driver = { "cxgbe", cxgbe_methods, sizeof(struct port_info) }; /* T4 VI (vcxgbe) interface */ static int vcxgbe_probe(device_t); static int vcxgbe_attach(device_t); static int vcxgbe_detach(device_t); static device_method_t vcxgbe_methods[] = { DEVMETHOD(device_probe, vcxgbe_probe), DEVMETHOD(device_attach, vcxgbe_attach), DEVMETHOD(device_detach, vcxgbe_detach), { 0, 0 } }; static driver_t vcxgbe_driver = { "vcxgbe", vcxgbe_methods, sizeof(struct vi_info) }; static d_ioctl_t t4_ioctl; static struct cdevsw t4_cdevsw = { .d_version = D_VERSION, .d_ioctl = t4_ioctl, .d_name = "t4nex", }; /* T5 bus driver interface */ static int t5_probe(device_t); static device_method_t t5_methods[] = { DEVMETHOD(device_probe, t5_probe), DEVMETHOD(device_attach, t4_attach), DEVMETHOD(device_detach, t4_detach), DEVMETHOD(bus_child_location_str, t4_child_location_str), DEVMETHOD(t4_is_main_ready, t4_ready), DEVMETHOD(t4_read_port_device, t4_read_port_device), DEVMETHOD_END }; static driver_t t5_driver = { "t5nex", t5_methods, sizeof(struct adapter) }; /* T5 port (cxl) interface */ static driver_t cxl_driver = { "cxl", cxgbe_methods, sizeof(struct port_info) }; /* T5 VI (vcxl) interface */ static driver_t vcxl_driver = { "vcxl", vcxgbe_methods, sizeof(struct vi_info) }; /* T6 bus driver interface */ static int t6_probe(device_t); static device_method_t t6_methods[] = { DEVMETHOD(device_probe, t6_probe), DEVMETHOD(device_attach, t4_attach), DEVMETHOD(device_detach, t4_detach), DEVMETHOD(bus_child_location_str, t4_child_location_str), DEVMETHOD(t4_is_main_ready, t4_ready), DEVMETHOD(t4_read_port_device, t4_read_port_device), DEVMETHOD_END }; static driver_t t6_driver = { "t6nex", t6_methods, sizeof(struct adapter) }; /* T6 port (cc) interface */ static driver_t cc_driver = { "cc", cxgbe_methods, sizeof(struct port_info) }; /* T6 VI (vcc) interface */ static driver_t vcc_driver = { "vcc", vcxgbe_methods, sizeof(struct vi_info) }; /* ifnet interface */ static void cxgbe_init(void *); static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); static int cxgbe_transmit(struct ifnet *, struct mbuf *); static void cxgbe_qflush(struct ifnet *); MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); /* * Correct lock order when you need to acquire multiple locks is t4_list_lock, * then ADAPTER_LOCK, then t4_uld_list_lock. */ static struct sx t4_list_lock; SLIST_HEAD(, adapter) t4_list; #ifdef TCP_OFFLOAD static struct sx t4_uld_list_lock; SLIST_HEAD(, uld_info) t4_uld_list; #endif /* * Tunables. See tweak_tunables() too. * * Each tunable is set to a default value here if it's known at compile-time. * Otherwise it is set to -n as an indication to tweak_tunables() that it should * provide a reasonable default (upto n) when the driver is loaded. * * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to * T5 are under hw.cxl. */ SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe(4) parameters"); SYSCTL_NODE(_hw, OID_AUTO, cxl, CTLFLAG_RD, 0, "cxgbe(4) T5+ parameters"); SYSCTL_NODE(_hw_cxgbe, OID_AUTO, toe, CTLFLAG_RD, 0, "cxgbe(4) TOE parameters"); /* * Number of queues for tx and rx, NIC and offload. */ #define NTXQ 16 int t4_ntxq = -NTXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq, CTLFLAG_RDTUN, &t4_ntxq, 0, "Number of TX queues per port"); TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */ #define NRXQ 8 int t4_nrxq = -NRXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq, CTLFLAG_RDTUN, &t4_nrxq, 0, "Number of RX queues per port"); TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */ #define NTXQ_VI 1 static int t4_ntxq_vi = -NTXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq_vi, CTLFLAG_RDTUN, &t4_ntxq_vi, 0, "Number of TX queues per VI"); #define NRXQ_VI 1 static int t4_nrxq_vi = -NRXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq_vi, CTLFLAG_RDTUN, &t4_nrxq_vi, 0, "Number of RX queues per VI"); static int t4_rsrv_noflowq = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, rsrv_noflowq, CTLFLAG_RDTUN, &t4_rsrv_noflowq, 0, "Reserve TX queue 0 of each VI for non-flowid packets"); #ifdef TCP_OFFLOAD #define NOFLDTXQ 8 static int t4_nofldtxq = -NOFLDTXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0, "Number of offload TX queues per port"); #define NOFLDRXQ 2 static int t4_nofldrxq = -NOFLDRXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0, "Number of offload RX queues per port"); #define NOFLDTXQ_VI 1 static int t4_nofldtxq_vi = -NOFLDTXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0, "Number of offload TX queues per VI"); #define NOFLDRXQ_VI 1 static int t4_nofldrxq_vi = -NOFLDRXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0, "Number of offload RX queues per VI"); #define TMR_IDX_OFLD 1 int t4_tmr_idx_ofld = TMR_IDX_OFLD; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN, &t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues"); #define PKTC_IDX_OFLD (-1) int t4_pktc_idx_ofld = PKTC_IDX_OFLD; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN, &t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_keepalive_idle = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_idle, CTLFLAG_RDTUN, &t4_toe_keepalive_idle, 0, "TOE keepalive idle timer (us)"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_keepalive_interval = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_interval, CTLFLAG_RDTUN, &t4_toe_keepalive_interval, 0, "TOE keepalive interval timer (us)"); /* 0 means chip/fw default, non-zero number is # of keepalives before abort */ static int t4_toe_keepalive_count = 0; SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, keepalive_count, CTLFLAG_RDTUN, &t4_toe_keepalive_count, 0, "Number of TOE keepalive probes before abort"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_rexmt_min = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_min, CTLFLAG_RDTUN, &t4_toe_rexmt_min, 0, "Minimum TOE retransmit interval (us)"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_rexmt_max = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_max, CTLFLAG_RDTUN, &t4_toe_rexmt_max, 0, "Maximum TOE retransmit interval (us)"); /* 0 means chip/fw default, non-zero number is # of rexmt before abort */ static int t4_toe_rexmt_count = 0; SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, rexmt_count, CTLFLAG_RDTUN, &t4_toe_rexmt_count, 0, "Number of TOE retransmissions before abort"); /* -1 means chip/fw default, other values are raw backoff values to use */ static int t4_toe_rexmt_backoff[16] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; SYSCTL_NODE(_hw_cxgbe_toe, OID_AUTO, rexmt_backoff, CTLFLAG_RD, 0, "cxgbe(4) TOE retransmit backoff values"); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 0, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[0], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 1, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[1], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 2, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[2], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 3, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[3], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 4, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[4], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 5, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[5], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 6, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[6], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 7, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[7], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 8, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[8], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 9, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[9], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 10, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[10], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 11, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[11], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 12, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[12], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 13, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[13], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 14, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[14], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[15], 0, ""); #endif #ifdef DEV_NETMAP #define NNMTXQ_VI 2 static int t4_nnmtxq_vi = -NNMTXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq_vi, CTLFLAG_RDTUN, &t4_nnmtxq_vi, 0, "Number of netmap TX queues per VI"); #define NNMRXQ_VI 2 static int t4_nnmrxq_vi = -NNMRXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq_vi, CTLFLAG_RDTUN, &t4_nnmrxq_vi, 0, "Number of netmap RX queues per VI"); #endif /* * Holdoff parameters for ports. */ #define TMR_IDX 1 int t4_tmr_idx = TMR_IDX; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx, CTLFLAG_RDTUN, &t4_tmr_idx, 0, "Holdoff timer index"); TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */ #define PKTC_IDX (-1) int t4_pktc_idx = PKTC_IDX; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx, CTLFLAG_RDTUN, &t4_pktc_idx, 0, "Holdoff packet counter index"); TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */ /* * Size (# of entries) of each tx and rx queue. */ unsigned int t4_qsize_txq = TX_EQ_QSIZE; SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, &t4_qsize_txq, 0, "Number of descriptors in each TX queue"); unsigned int t4_qsize_rxq = RX_IQ_QSIZE; SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, &t4_qsize_rxq, 0, "Number of descriptors in each RX queue"); /* * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). */ int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; SYSCTL_INT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &t4_intr_types, 0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)"); /* * Configuration file. All the _CF names here are special. */ #define DEFAULT_CF "default" #define BUILTIN_CF "built-in" #define FLASH_CF "flash" #define UWIRE_CF "uwire" #define FPGA_CF "fpga" static char t4_cfg_file[32] = DEFAULT_CF; SYSCTL_STRING(_hw_cxgbe, OID_AUTO, config_file, CTLFLAG_RDTUN, t4_cfg_file, sizeof(t4_cfg_file), "Firmware configuration file"); /* * PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively). * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water * mark or when signalled to do so, 0 to never emit PAUSE. * pause_autoneg = 1 means PAUSE will be negotiated if possible and the * negotiated settings will override rx_pause/tx_pause. * Otherwise rx_pause/tx_pause are applied forcibly. */ static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG; SYSCTL_INT(_hw_cxgbe, OID_AUTO, pause_settings, CTLFLAG_RDTUN, &t4_pause_settings, 0, "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)"); /* * Forward Error Correction settings (bit 0, 1 = RS, BASER respectively). * -1 to run with the firmware default. Same as FEC_AUTO (bit 5) * 0 to disable FEC. */ static int t4_fec = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0, "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)"); /* * Link autonegotiation. * -1 to run with the firmware default. * 0 to disable. * 1 to enable. */ static int t4_autoneg = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0, "Link autonegotiation"); /* * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, * encouraged respectively). '-n' is the same as 'n' except the firmware * version used in the checks is read from the firmware bundled with the driver. */ static int t4_fw_install = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0, "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)"); /* * ASIC features that will be used. Disable the ones you don't want so that the * chip resources aren't wasted on features that will not be used. */ static int t4_nbmcaps_allowed = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nbmcaps_allowed, CTLFLAG_RDTUN, &t4_nbmcaps_allowed, 0, "Default NBM capabilities"); static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ SYSCTL_INT(_hw_cxgbe, OID_AUTO, linkcaps_allowed, CTLFLAG_RDTUN, &t4_linkcaps_allowed, 0, "Default link capabilities"); static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | FW_CAPS_CONFIG_SWITCH_EGRESS; SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN, &t4_switchcaps_allowed, 0, "Default switch capabilities"); #ifdef RATELIMIT static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC | FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD; #else static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC | FW_CAPS_CONFIG_NIC_HASHFILTER; #endif SYSCTL_INT(_hw_cxgbe, OID_AUTO, niccaps_allowed, CTLFLAG_RDTUN, &t4_niccaps_allowed, 0, "Default NIC capabilities"); static int t4_toecaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, toecaps_allowed, CTLFLAG_RDTUN, &t4_toecaps_allowed, 0, "Default TCP offload capabilities"); static int t4_rdmacaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, rdmacaps_allowed, CTLFLAG_RDTUN, &t4_rdmacaps_allowed, 0, "Default RDMA capabilities"); static int t4_cryptocaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, cryptocaps_allowed, CTLFLAG_RDTUN, &t4_cryptocaps_allowed, 0, "Default crypto capabilities"); static int t4_iscsicaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, iscsicaps_allowed, CTLFLAG_RDTUN, &t4_iscsicaps_allowed, 0, "Default iSCSI capabilities"); static int t4_fcoecaps_allowed = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fcoecaps_allowed, CTLFLAG_RDTUN, &t4_fcoecaps_allowed, 0, "Default FCoE capabilities"); static int t5_write_combine = 0; SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine, 0, "Use WC instead of UC for BAR2"); static int t4_num_vis = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0, "Number of VIs per port"); /* * PCIe Relaxed Ordering. * -1: driver should figure out a good value. * 0: disable RO. * 1: enable RO. * 2: leave RO alone. */ static int pcie_relaxed_ordering = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN, &pcie_relaxed_ordering, 0, "PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone"); static int t4_panic_on_fatal_err = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RDTUN, &t4_panic_on_fatal_err, 0, "panic on fatal errors"); #ifdef TCP_OFFLOAD /* * TOE tunables. */ static int t4_cop_managed_offloading = 0; TUNABLE_INT("hw.cxgbe.cop_managed_offloading", &t4_cop_managed_offloading); #endif /* Functions used by VIs to obtain unique MAC addresses for each VI. */ static int vi_mac_funcs[] = { FW_VI_FUNC_ETH, FW_VI_FUNC_OFLD, FW_VI_FUNC_IWARP, FW_VI_FUNC_OPENISCSI, FW_VI_FUNC_OPENFCOE, FW_VI_FUNC_FOISCSI, FW_VI_FUNC_FOFCOE, }; struct intrs_and_queues { uint16_t intr_type; /* INTx, MSI, or MSI-X */ uint16_t num_vis; /* number of VIs for each port */ uint16_t nirq; /* Total # of vectors */ uint16_t ntxq; /* # of NIC txq's for each port */ uint16_t nrxq; /* # of NIC rxq's for each port */ uint16_t nofldtxq; /* # of TOE txq's for each port */ uint16_t nofldrxq; /* # of TOE rxq's for each port */ /* The vcxgbe/vcxl interfaces use these and not the ones above. */ uint16_t ntxq_vi; /* # of NIC txq's */ uint16_t nrxq_vi; /* # of NIC rxq's */ uint16_t nofldtxq_vi; /* # of TOE txq's */ uint16_t nofldrxq_vi; /* # of TOE rxq's */ uint16_t nnmtxq_vi; /* # of netmap txq's */ uint16_t nnmrxq_vi; /* # of netmap rxq's */ }; static void setup_memwin(struct adapter *); static void position_memwin(struct adapter *, int, uint32_t); static int validate_mem_range(struct adapter *, uint32_t, uint32_t); static int fwmtype_to_hwmtype(int); static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t, uint32_t *); static int fixup_devlog_params(struct adapter *); static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *); static int contact_firmware(struct adapter *); static int partition_resources(struct adapter *); static int get_params__pre_init(struct adapter *); static int set_params__pre_init(struct adapter *); static int get_params__post_init(struct adapter *); static int set_params__post_init(struct adapter *); static void t4_set_desc(struct adapter *); static bool fixed_ifmedia(struct port_info *); static void build_medialist(struct port_info *); static void init_link_config(struct port_info *); static int fixup_link_config(struct port_info *); static int apply_link_config(struct port_info *); static int cxgbe_init_synchronized(struct vi_info *); static int cxgbe_uninit_synchronized(struct vi_info *); static void quiesce_txq(struct adapter *, struct sge_txq *); static void quiesce_wrq(struct adapter *, struct sge_wrq *); static void quiesce_iq(struct adapter *, struct sge_iq *); static void quiesce_fl(struct adapter *, struct sge_fl *); static int t4_alloc_irq(struct adapter *, struct irq *, int rid, driver_intr_t *, void *, char *); static int t4_free_irq(struct adapter *, struct irq *); static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); static void vi_refresh_stats(struct adapter *, struct vi_info *); static void cxgbe_refresh_stats(struct adapter *, struct port_info *); static void cxgbe_tick(void *); static void cxgbe_sysctls(struct port_info *); static int sysctl_int_array(SYSCTL_HANDLER_ARGS); static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS); static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS); static int sysctl_btphy(SYSCTL_HANDLER_ARGS); static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); static int sysctl_fec(SYSCTL_HANDLER_ARGS); static int sysctl_autoneg(SYSCTL_HANDLER_ARGS); static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); static int sysctl_temperature(SYSCTL_HANDLER_ARGS); static int sysctl_loadavg(SYSCTL_HANDLER_ARGS); static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); static int sysctl_devlog(SYSCTL_HANDLER_ARGS); static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tids(SYSCTL_HANDLER_ARGS); static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); static int sysctl_cpus(SYSCTL_HANDLER_ARGS); #ifdef TCP_OFFLOAD static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS); static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS); static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS); #endif static int get_sge_context(struct adapter *, struct t4_sge_context *); static int load_fw(struct adapter *, struct t4_data *); static int load_cfg(struct adapter *, struct t4_data *); static int load_boot(struct adapter *, struct t4_bootrom *); static int load_bootcfg(struct adapter *, struct t4_data *); static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *); static void free_offload_policy(struct t4_offload_policy *); static int set_offload_policy(struct adapter *, struct t4_offload_policy *); static int read_card_mem(struct adapter *, int, struct t4_mem_range *); static int read_i2c(struct adapter *, struct t4_i2c_data *); #ifdef TCP_OFFLOAD static int toe_capability(struct vi_info *, int); #endif static int mod_event(module_t, int, void *); static int notify_siblings(device_t, int); struct { uint16_t device; char *desc; } t4_pciids[] = { {0xa000, "Chelsio Terminator 4 FPGA"}, {0x4400, "Chelsio T440-dbg"}, {0x4401, "Chelsio T420-CR"}, {0x4402, "Chelsio T422-CR"}, {0x4403, "Chelsio T440-CR"}, {0x4404, "Chelsio T420-BCH"}, {0x4405, "Chelsio T440-BCH"}, {0x4406, "Chelsio T440-CH"}, {0x4407, "Chelsio T420-SO"}, {0x4408, "Chelsio T420-CX"}, {0x4409, "Chelsio T420-BT"}, {0x440a, "Chelsio T404-BT"}, {0x440e, "Chelsio T440-LP-CR"}, }, t5_pciids[] = { {0xb000, "Chelsio Terminator 5 FPGA"}, {0x5400, "Chelsio T580-dbg"}, {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */ {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */ {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */ {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */ /* Custom */ {0x5483, "Custom T540-CR"}, {0x5484, "Custom T540-BT"}, }, t6_pciids[] = { {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */ {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */ {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */ {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */ {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */ {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */ {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */ {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */ {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */ {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */ {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */ {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */ /* Custom */ {0x6480, "Custom T6225-CR"}, {0x6481, "Custom T62100-CR"}, {0x6482, "Custom T6225-CR"}, {0x6483, "Custom T62100-CR"}, {0x6484, "Custom T64100-CR"}, {0x6485, "Custom T6240-SO"}, {0x6486, "Custom T6225-SO-CR"}, {0x6487, "Custom T6225-CR"}, }; #ifdef TCP_OFFLOAD /* * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should * be exactly the same for both rxq and ofld_rxq. */ CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); #endif CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); static int t4_probe(device_t dev) { int i; uint16_t v = pci_get_vendor(dev); uint16_t d = pci_get_device(dev); uint8_t f = pci_get_function(dev); if (v != PCI_VENDOR_ID_CHELSIO) return (ENXIO); /* Attach only to PF0 of the FPGA */ if (d == 0xa000 && f != 0) return (ENXIO); for (i = 0; i < nitems(t4_pciids); i++) { if (d == t4_pciids[i].device) { device_set_desc(dev, t4_pciids[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int t5_probe(device_t dev) { int i; uint16_t v = pci_get_vendor(dev); uint16_t d = pci_get_device(dev); uint8_t f = pci_get_function(dev); if (v != PCI_VENDOR_ID_CHELSIO) return (ENXIO); /* Attach only to PF0 of the FPGA */ if (d == 0xb000 && f != 0) return (ENXIO); for (i = 0; i < nitems(t5_pciids); i++) { if (d == t5_pciids[i].device) { device_set_desc(dev, t5_pciids[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int t6_probe(device_t dev) { int i; uint16_t v = pci_get_vendor(dev); uint16_t d = pci_get_device(dev); if (v != PCI_VENDOR_ID_CHELSIO) return (ENXIO); for (i = 0; i < nitems(t6_pciids); i++) { if (d == t6_pciids[i].device) { device_set_desc(dev, t6_pciids[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static void t5_attribute_workaround(device_t dev) { device_t root_port; uint32_t v; /* * The T5 chips do not properly echo the No Snoop and Relaxed * Ordering attributes when replying to a TLP from a Root * Port. As a workaround, find the parent Root Port and * disable No Snoop and Relaxed Ordering. Note that this * affects all devices under this root port. */ root_port = pci_find_pcie_root_port(dev); if (root_port == NULL) { device_printf(dev, "Unable to find parent root port\n"); return; } v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 0) device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", device_get_nameunit(root_port)); } static const struct devnames devnames[] = { { .nexus_name = "t4nex", .ifnet_name = "cxgbe", .vi_ifnet_name = "vcxgbe", .pf03_drv_name = "t4iov", .vf_nexus_name = "t4vf", .vf_ifnet_name = "cxgbev" }, { .nexus_name = "t5nex", .ifnet_name = "cxl", .vi_ifnet_name = "vcxl", .pf03_drv_name = "t5iov", .vf_nexus_name = "t5vf", .vf_ifnet_name = "cxlv" }, { .nexus_name = "t6nex", .ifnet_name = "cc", .vi_ifnet_name = "vcc", .pf03_drv_name = "t6iov", .vf_nexus_name = "t6vf", .vf_ifnet_name = "ccv" } }; void t4_init_devnames(struct adapter *sc) { int id; id = chip_id(sc); if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) sc->names = &devnames[id - CHELSIO_T4]; else { device_printf(sc->dev, "chip id %d is not supported.\n", id); sc->names = NULL; } } static int t4_ifnet_unit(struct adapter *sc, struct port_info *pi) { const char *parent, *name; long value; int line, unit; line = 0; parent = device_get_nameunit(sc->dev); name = sc->names->ifnet_name; while (resource_find_dev(&line, name, &unit, "at", parent) == 0) { if (resource_long_value(name, unit, "port", &value) == 0 && value == pi->port_id) return (unit); } return (-1); } static int t4_attach(device_t dev) { struct adapter *sc; int rc = 0, i, j, rqidx, tqidx, nports; struct make_dev_args mda; struct intrs_and_queues iaq; struct sge *s; uint32_t *buf; #ifdef TCP_OFFLOAD int ofld_rqidx, ofld_tqidx; #endif #ifdef DEV_NETMAP int nm_rqidx, nm_tqidx; #endif int num_vis; sc = device_get_softc(dev); sc->dev = dev; TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); if ((pci_get_device(dev) & 0xff00) == 0x5400) t5_attribute_workaround(dev); pci_enable_busmaster(dev); if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { uint32_t v; pci_set_max_read_req(dev, 4096); v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); if (pcie_relaxed_ordering == 0 && (v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) { v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE; pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); } else if (pcie_relaxed_ordering == 1 && (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) { v |= PCIEM_CTL_RELAXED_ORD_ENABLE; pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); } } sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); sc->traceq = -1; mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", device_get_nameunit(dev)); snprintf(sc->lockname, sizeof(sc->lockname), "%s", device_get_nameunit(dev)); mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); t4_add_adapter(sc); mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); TAILQ_INIT(&sc->sfl); callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); sc->policy = NULL; rw_init(&sc->policy_lock, "connection offload policy"); rc = t4_map_bars_0_and_4(sc); if (rc != 0) goto done; /* error message displayed already */ memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); /* Prepare the adapter for operation. */ buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); rc = -t4_prep_adapter(sc, buf); free(buf, M_CXGBE); if (rc != 0) { device_printf(dev, "failed to prepare adapter: %d.\n", rc); goto done; } /* * This is the real PF# to which we're attaching. Works from within PCI * passthrough environments too, where pci_get_function() could return a * different PF# depending on the passthrough configuration. We need to * use the real PF# in all our communication with the firmware. */ j = t4_read_reg(sc, A_PL_WHOAMI); sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); sc->mbox = sc->pf; t4_init_devnames(sc); if (sc->names == NULL) { rc = ENOTSUP; goto done; /* error message displayed already */ } /* * Do this really early, with the memory windows set up even before the * character device. The userland tool's register i/o and mem read * will work even in "recovery mode". */ setup_memwin(sc); if (t4_init_devlog_params(sc, 0) == 0) fixup_devlog_params(sc); make_dev_args_init(&mda); mda.mda_devsw = &t4_cdevsw; mda.mda_uid = UID_ROOT; mda.mda_gid = GID_WHEEL; mda.mda_mode = 0600; mda.mda_si_drv1 = sc; rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); if (rc != 0) device_printf(dev, "failed to create nexus char device: %d.\n", rc); /* Go no further if recovery mode has been requested. */ if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { device_printf(dev, "recovery mode.\n"); goto done; } #if defined(__i386__) if ((cpu_feature & CPUID_CX8) == 0) { device_printf(dev, "64 bit atomics not available.\n"); rc = ENOTSUP; goto done; } #endif /* Contact the firmware and try to become the master driver. */ rc = contact_firmware(sc); if (rc != 0) goto done; /* error message displayed already */ MPASS(sc->flags & FW_OK); rc = get_params__pre_init(sc); if (rc != 0) goto done; /* error message displayed already */ if (sc->flags & MASTER_PF) { rc = partition_resources(sc); if (rc != 0) goto done; /* error message displayed already */ t4_intr_clear(sc); } rc = get_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = set_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = t4_map_bar_2(sc); if (rc != 0) goto done; /* error message displayed already */ rc = t4_create_dma_tag(sc); if (rc != 0) goto done; /* error message displayed already */ /* * First pass over all the ports - allocate VIs and initialize some * basic parameters like mac address, port type, etc. */ for_each_port(sc, i) { struct port_info *pi; pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); sc->port[i] = pi; /* These must be set before t4_port_init */ pi->adapter = sc; pi->port_id = i; /* * XXX: vi[0] is special so we can't delay this allocation until * pi->nvi's final value is known. */ pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE, M_ZERO | M_WAITOK); /* * Allocate the "main" VI and initialize parameters * like mac addr. */ rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); if (rc != 0) { device_printf(dev, "unable to initialize port %d: %d\n", i, rc); free(pi->vi, M_CXGBE); free(pi, M_CXGBE); sc->port[i] = NULL; goto done; } snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", device_get_nameunit(dev), i); mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); sc->chan_map[pi->tx_chan] = i; /* All VIs on this port share this media. */ ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, cxgbe_media_status); PORT_LOCK(pi); init_link_config(pi); fixup_link_config(pi); build_medialist(pi); if (fixed_ifmedia(pi)) pi->flags |= FIXED_IFMEDIA; PORT_UNLOCK(pi); pi->dev = device_add_child(dev, sc->names->ifnet_name, t4_ifnet_unit(sc, pi)); if (pi->dev == NULL) { device_printf(dev, "failed to add device for port %d.\n", i); rc = ENXIO; goto done; } pi->vi[0].dev = pi->dev; device_set_softc(pi->dev, pi); } /* * Interrupt type, # of interrupts, # of rx/tx queues, etc. */ nports = sc->params.nports; rc = cfg_itype_and_nqueues(sc, &iaq); if (rc != 0) goto done; /* error message displayed already */ num_vis = iaq.num_vis; sc->intr_type = iaq.intr_type; sc->intr_count = iaq.nirq; s = &sc->sge; s->nrxq = nports * iaq.nrxq; s->ntxq = nports * iaq.ntxq; if (num_vis > 1) { s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi; s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi; } s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ s->neq += nports; /* ctrl queues: 1 per port */ s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ #ifdef TCP_OFFLOAD if (is_offload(sc)) { s->nofldrxq = nports * iaq.nofldrxq; s->nofldtxq = nports * iaq.nofldtxq; if (num_vis > 1) { s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi; s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi; } s->neq += s->nofldtxq + s->nofldrxq; s->niq += s->nofldrxq; s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), M_CXGBE, M_ZERO | M_WAITOK); s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), M_CXGBE, M_ZERO | M_WAITOK); } #endif #ifdef DEV_NETMAP if (num_vis > 1) { s->nnmrxq = nports * (num_vis - 1) * iaq.nnmrxq_vi; s->nnmtxq = nports * (num_vis - 1) * iaq.nnmtxq_vi; } s->neq += s->nnmtxq + s->nnmrxq; s->niq += s->nnmrxq; s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), M_CXGBE, M_ZERO | M_WAITOK); s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), M_CXGBE, M_ZERO | M_WAITOK); #endif s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE, M_ZERO | M_WAITOK); s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, M_ZERO | M_WAITOK); s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, M_ZERO | M_WAITOK); s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, M_ZERO | M_WAITOK); s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, M_ZERO | M_WAITOK); sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, M_ZERO | M_WAITOK); t4_init_l2t(sc, M_WAITOK); t4_init_smt(sc, M_WAITOK); t4_init_tx_sched(sc); #ifdef INET6 t4_init_clip_table(sc); #endif if (sc->vres.key.size != 0) sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start, sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK); /* * Second pass over the ports. This time we know the number of rx and * tx queues that each port should get. */ rqidx = tqidx = 0; #ifdef TCP_OFFLOAD ofld_rqidx = ofld_tqidx = 0; #endif #ifdef DEV_NETMAP nm_rqidx = nm_tqidx = 0; #endif for_each_port(sc, i) { struct port_info *pi = sc->port[i]; struct vi_info *vi; if (pi == NULL) continue; pi->nvi = num_vis; for_each_vi(pi, j, vi) { vi->pi = pi; vi->qsize_rxq = t4_qsize_rxq; vi->qsize_txq = t4_qsize_txq; vi->first_rxq = rqidx; vi->first_txq = tqidx; vi->tmr_idx = t4_tmr_idx; vi->pktc_idx = t4_pktc_idx; vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi; vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi; rqidx += vi->nrxq; tqidx += vi->ntxq; if (j == 0 && vi->ntxq > 1) vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0; else vi->rsrv_noflowq = 0; #ifdef TCP_OFFLOAD vi->ofld_tmr_idx = t4_tmr_idx_ofld; vi->ofld_pktc_idx = t4_pktc_idx_ofld; vi->first_ofld_rxq = ofld_rqidx; vi->first_ofld_txq = ofld_tqidx; vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi; vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi; ofld_rqidx += vi->nofldrxq; ofld_tqidx += vi->nofldtxq; #endif #ifdef DEV_NETMAP if (j > 0) { vi->first_nm_rxq = nm_rqidx; vi->first_nm_txq = nm_tqidx; vi->nnmrxq = iaq.nnmrxq_vi; vi->nnmtxq = iaq.nnmtxq_vi; nm_rqidx += vi->nnmrxq; nm_tqidx += vi->nnmtxq; } #endif } } rc = t4_setup_intr_handlers(sc); if (rc != 0) { device_printf(dev, "failed to setup interrupt handlers: %d\n", rc); goto done; } rc = bus_generic_probe(dev); if (rc != 0) { device_printf(dev, "failed to probe child drivers: %d\n", rc); goto done; } /* * Ensure thread-safe mailbox access (in debug builds). * * So far this was the only thread accessing the mailbox but various * ifnets and sysctls are about to be created and their handlers/ioctls * will access the mailbox from different threads. */ sc->flags |= CHK_MBOX_ACCESS; rc = bus_generic_attach(dev); if (rc != 0) { device_printf(dev, "failed to attach all child ports: %d\n", rc); goto done; } device_printf(dev, "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", sc->params.pci.speed, sc->params.pci.width, sc->params.nports, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); t4_set_desc(sc); notify_siblings(dev, 0); done: if (rc != 0 && sc->cdev) { /* cdev was created and so cxgbetool works; recover that way. */ device_printf(dev, "error during attach, adapter is now in recovery mode.\n"); rc = 0; } if (rc != 0) t4_detach_common(dev); else t4_sysctls(sc); return (rc); } static int t4_child_location_str(device_t bus, device_t dev, char *buf, size_t buflen) { struct adapter *sc; struct port_info *pi; int i; sc = device_get_softc(bus); buf[0] = '\0'; for_each_port(sc, i) { pi = sc->port[i]; if (pi != NULL && pi->dev == dev) { snprintf(buf, buflen, "port=%d", pi->port_id); break; } } return (0); } static int t4_ready(device_t dev) { struct adapter *sc; sc = device_get_softc(dev); if (sc->flags & FW_OK) return (0); return (ENXIO); } static int t4_read_port_device(device_t dev, int port, device_t *child) { struct adapter *sc; struct port_info *pi; sc = device_get_softc(dev); if (port < 0 || port >= MAX_NPORTS) return (EINVAL); pi = sc->port[port]; if (pi == NULL || pi->dev == NULL) return (ENXIO); *child = pi->dev; return (0); } static int notify_siblings(device_t dev, int detaching) { device_t sibling; int error, i; error = 0; for (i = 0; i < PCI_FUNCMAX; i++) { if (i == pci_get_function(dev)) continue; sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), i); if (sibling == NULL || !device_is_attached(sibling)) continue; if (detaching) error = T4_DETACH_CHILD(sibling); else (void)T4_ATTACH_CHILD(sibling); if (error) break; } return (error); } /* * Idempotent */ static int t4_detach(device_t dev) { struct adapter *sc; int rc; sc = device_get_softc(dev); rc = notify_siblings(dev, 1); if (rc) { device_printf(dev, "failed to detach sibling devices: %d\n", rc); return (rc); } return (t4_detach_common(dev)); } int t4_detach_common(device_t dev) { struct adapter *sc; struct port_info *pi; int i, rc; sc = device_get_softc(dev); if (sc->cdev) { destroy_dev(sc->cdev); sc->cdev = NULL; } sc->flags &= ~CHK_MBOX_ACCESS; if (sc->flags & FULL_INIT_DONE) { if (!(sc->flags & IS_VF)) t4_intr_disable(sc); } if (device_is_attached(dev)) { rc = bus_generic_detach(dev); if (rc) { device_printf(dev, "failed to detach child devices: %d\n", rc); return (rc); } } for (i = 0; i < sc->intr_count; i++) t4_free_irq(sc, &sc->irq[i]); if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) t4_free_tx_sched(sc); for (i = 0; i < MAX_NPORTS; i++) { pi = sc->port[i]; if (pi) { t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); if (pi->dev) device_delete_child(dev, pi->dev); mtx_destroy(&pi->pi_lock); free(pi->vi, M_CXGBE); free(pi, M_CXGBE); } } device_delete_children(dev); if (sc->flags & FULL_INIT_DONE) adapter_full_uninit(sc); if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) t4_fw_bye(sc, sc->mbox); if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) pci_release_msi(dev); if (sc->regs_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, sc->regs_res); if (sc->udbs_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, sc->udbs_res); if (sc->msix_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, sc->msix_res); if (sc->l2t) t4_free_l2t(sc->l2t); if (sc->key_map) vmem_destroy(sc->key_map); if (sc->smt) t4_free_smt(sc->smt); #ifdef INET6 t4_destroy_clip_table(sc); #endif #ifdef TCP_OFFLOAD free(sc->sge.ofld_rxq, M_CXGBE); free(sc->sge.ofld_txq, M_CXGBE); #endif #ifdef DEV_NETMAP free(sc->sge.nm_rxq, M_CXGBE); free(sc->sge.nm_txq, M_CXGBE); #endif free(sc->irq, M_CXGBE); free(sc->sge.rxq, M_CXGBE); free(sc->sge.txq, M_CXGBE); free(sc->sge.ctrlq, M_CXGBE); free(sc->sge.iqmap, M_CXGBE); free(sc->sge.eqmap, M_CXGBE); free(sc->tids.ftid_tab, M_CXGBE); free(sc->tids.hpftid_tab, M_CXGBE); free_hftid_hash(&sc->tids); free(sc->tids.atid_tab, M_CXGBE); free(sc->tids.tid_tab, M_CXGBE); free(sc->tt.tls_rx_ports, M_CXGBE); t4_destroy_dma_tag(sc); if (mtx_initialized(&sc->sc_lock)) { sx_xlock(&t4_list_lock); SLIST_REMOVE(&t4_list, sc, adapter, link); sx_xunlock(&t4_list_lock); mtx_destroy(&sc->sc_lock); } callout_drain(&sc->sfl_callout); if (mtx_initialized(&sc->tids.ftid_lock)) { mtx_destroy(&sc->tids.ftid_lock); cv_destroy(&sc->tids.ftid_cv); } if (mtx_initialized(&sc->tids.atid_lock)) mtx_destroy(&sc->tids.atid_lock); if (mtx_initialized(&sc->sfl_lock)) mtx_destroy(&sc->sfl_lock); if (mtx_initialized(&sc->ifp_lock)) mtx_destroy(&sc->ifp_lock); if (mtx_initialized(&sc->reg_lock)) mtx_destroy(&sc->reg_lock); if (rw_initialized(&sc->policy_lock)) { rw_destroy(&sc->policy_lock); #ifdef TCP_OFFLOAD if (sc->policy != NULL) free_offload_policy(sc->policy); #endif } for (i = 0; i < NUM_MEMWIN; i++) { struct memwin *mw = &sc->memwin[i]; if (rw_initialized(&mw->mw_lock)) rw_destroy(&mw->mw_lock); } bzero(sc, sizeof(*sc)); return (0); } static int cxgbe_probe(device_t dev) { char buf[128]; struct port_info *pi = device_get_softc(dev); snprintf(buf, sizeof(buf), "port %d", pi->port_id); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) #define T4_CAP_ENABLE (T4_CAP) static int cxgbe_vi_attach(device_t dev, struct vi_info *vi) { struct ifnet *ifp; struct sbuf *sb; vi->xact_addr_filt = -1; callout_init(&vi->tick, 1); /* Allocate an ifnet and set it up */ ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "Cannot allocate ifnet\n"); return (ENOMEM); } vi->ifp = ifp; ifp->if_softc = vi; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = cxgbe_init; ifp->if_ioctl = cxgbe_ioctl; ifp->if_transmit = cxgbe_transmit; ifp->if_qflush = cxgbe_qflush; ifp->if_get_counter = cxgbe_get_counter; ifp->if_capabilities = T4_CAP; #ifdef TCP_OFFLOAD if (vi->nofldrxq != 0) ifp->if_capabilities |= IFCAP_TOE; #endif ifp->if_capenable = T4_CAP_ENABLE; ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | CSUM_UDP_IPV6 | CSUM_TCP_IPV6; ifp->if_hw_tsomax = IP_MAXPACKET; ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_TSO; #ifdef RATELIMIT if (is_ethoffload(vi->pi->adapter) && vi->nofldtxq != 0) ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_EO_TSO; #endif ifp->if_hw_tsomaxsegsize = 65536; ether_ifattach(ifp, vi->hw_addr); #ifdef DEV_NETMAP if (vi->nnmrxq != 0) cxgbe_nm_attach(vi); #endif sb = sbuf_new_auto(); sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); #ifdef TCP_OFFLOAD if (ifp->if_capabilities & IFCAP_TOE) sbuf_printf(sb, "; %d txq, %d rxq (TOE)", vi->nofldtxq, vi->nofldrxq); #endif #ifdef DEV_NETMAP if (ifp->if_capabilities & IFCAP_NETMAP) sbuf_printf(sb, "; %d txq, %d rxq (netmap)", vi->nnmtxq, vi->nnmrxq); #endif sbuf_finish(sb); device_printf(dev, "%s\n", sbuf_data(sb)); sbuf_delete(sb); vi_sysctls(vi); return (0); } static int cxgbe_attach(device_t dev) { struct port_info *pi = device_get_softc(dev); struct adapter *sc = pi->adapter; struct vi_info *vi; int i, rc; callout_init_mtx(&pi->tick, &pi->pi_lock, 0); rc = cxgbe_vi_attach(dev, &pi->vi[0]); if (rc) return (rc); for_each_vi(pi, i, vi) { if (i == 0) continue; vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1); if (vi->dev == NULL) { device_printf(dev, "failed to add VI %d\n", i); continue; } device_set_softc(vi->dev, vi); } cxgbe_sysctls(pi); bus_generic_attach(dev); return (0); } static void cxgbe_vi_detach(struct vi_info *vi) { struct ifnet *ifp = vi->ifp; ether_ifdetach(ifp); /* Let detach proceed even if these fail. */ #ifdef DEV_NETMAP if (ifp->if_capabilities & IFCAP_NETMAP) cxgbe_nm_detach(vi); #endif cxgbe_uninit_synchronized(vi); callout_drain(&vi->tick); vi_full_uninit(vi); if_free(vi->ifp); vi->ifp = NULL; } static int cxgbe_detach(device_t dev) { struct port_info *pi = device_get_softc(dev); struct adapter *sc = pi->adapter; int rc; /* Detach the extra VIs first. */ rc = bus_generic_detach(dev); if (rc) return (rc); device_delete_children(dev); doom_vi(sc, &pi->vi[0]); if (pi->flags & HAS_TRACEQ) { sc->traceq = -1; /* cloner should not create ifnet */ t4_tracer_port_detach(sc); } cxgbe_vi_detach(&pi->vi[0]); callout_drain(&pi->tick); ifmedia_removeall(&pi->media); end_synchronized_op(sc, 0); return (0); } static void cxgbe_init(void *arg) { struct vi_info *vi = arg; struct adapter *sc = vi->pi->adapter; if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) return; cxgbe_init_synchronized(vi); end_synchronized_op(sc, 0); } static int cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) { int rc = 0, mtu, can_sleep, if_flags, if_drv_flags, vi_if_flags; struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct ifreq *ifr = (struct ifreq *)data; uint32_t mask; switch (cmd) { case SIOCSIFMTU: mtu = ifr->ifr_mtu; if (mtu < ETHERMIN || mtu > MAX_MTU) return (EINVAL); rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); if (rc) return (rc); ifp->if_mtu = mtu; if (vi->flags & VI_INIT_DONE) { t4_update_fl_bufsize(ifp); if (ifp->if_drv_flags & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_MTU); } end_synchronized_op(sc, 0); break; case SIOCSIFFLAGS: /* * Decide what to do, with the port lock held. */ PORT_LOCK(pi); if_flags = ifp->if_flags; if_drv_flags = ifp->if_drv_flags; vi_if_flags = vi->if_flags; if (if_flags & IFF_UP && if_drv_flags & IFF_DRV_RUNNING && (vi_if_flags ^ if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { can_sleep = 0; } else { can_sleep = 1; } PORT_UNLOCK(pi); /* * ifp/vi flags may change here but we'll just do what our local * copy of the flags indicates and then update the driver owned * ifp/vi flags (in a synch-op and with the port lock held) to * reflect what we did. */ rc = begin_synchronized_op(sc, vi, can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); if (rc) { if_printf(ifp, "%ssleepable synch operation failed: %d." " if_flags 0x%08x, if_drv_flags 0x%08x\n", can_sleep ? "" : "non-", rc, if_flags, if_drv_flags); return (rc); } if (if_flags & IFF_UP) { if (if_drv_flags & IFF_DRV_RUNNING) { if ((if_flags ^ vi_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { MPASS(can_sleep == 0); rc = update_mac_settings(ifp, XGMAC_PROMISC | XGMAC_ALLMULTI); } } else { MPASS(can_sleep == 1); rc = cxgbe_init_synchronized(vi); } } else if (if_drv_flags & IFF_DRV_RUNNING) { MPASS(can_sleep == 1); rc = cxgbe_uninit_synchronized(vi); } PORT_LOCK(pi); vi->if_flags = if_flags; PORT_UNLOCK(pi); end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); break; case SIOCADDMULTI: case SIOCDELMULTI: /* these two are called with a mutex held :-( */ rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); if (rc) return (rc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_MCADDRS); end_synchronized_op(sc, LOCK_HELD); break; case SIOCSIFCAP: rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); if (rc) return (rc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (IFCAP_TSO4 & ifp->if_capenable && !(IFCAP_TXCSUM & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO4; if_printf(ifp, "tso4 disabled due to -txcsum.\n"); } } if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); if (IFCAP_TSO6 & ifp->if_capenable && !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO6; if_printf(ifp, "tso6 disabled due to -txcsum6.\n"); } } if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; /* * Note that we leave CSUM_TSO alone (it is always set). The * kernel takes both IFCAP_TSOx and CSUM_TSO into account before * sending a TSO request our way, so it's sufficient to toggle * IFCAP_TSOx only. */ if (mask & IFCAP_TSO4) { if (!(IFCAP_TSO4 & ifp->if_capenable) && !(IFCAP_TXCSUM & ifp->if_capenable)) { if_printf(ifp, "enable txcsum first.\n"); rc = EAGAIN; goto fail; } ifp->if_capenable ^= IFCAP_TSO4; } if (mask & IFCAP_TSO6) { if (!(IFCAP_TSO6 & ifp->if_capenable) && !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { if_printf(ifp, "enable txcsum6 first.\n"); rc = EAGAIN; goto fail; } ifp->if_capenable ^= IFCAP_TSO6; } if (mask & IFCAP_LRO) { #if defined(INET) || defined(INET6) int i; struct sge_rxq *rxq; ifp->if_capenable ^= IFCAP_LRO; for_each_rxq(vi, i, rxq) { if (ifp->if_capenable & IFCAP_LRO) rxq->iq.flags |= IQ_LRO_ENABLED; else rxq->iq.flags &= ~IQ_LRO_ENABLED; } #endif } #ifdef TCP_OFFLOAD if (mask & IFCAP_TOE) { int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; rc = toe_capability(vi, enable); if (rc != 0) goto fail; ifp->if_capenable ^= mask; } #endif if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (ifp->if_drv_flags & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_VLANEX); } if (mask & IFCAP_VLAN_MTU) { ifp->if_capenable ^= IFCAP_VLAN_MTU; /* Need to find out how to disable auto-mtu-inflation */ } if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (mask & IFCAP_VLAN_HWCSUM) ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; #ifdef VLAN_CAPABILITIES VLAN_CAPABILITIES(ifp); #endif fail: end_synchronized_op(sc, 0); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: case SIOCGIFXMEDIA: ifmedia_ioctl(ifp, ifr, &pi->media, cmd); break; case SIOCGI2C: { struct ifi2creq i2c; rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (rc != 0) break; if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { rc = EPERM; break; } if (i2c.len > sizeof(i2c.data)) { rc = EINVAL; break; } rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); if (rc) return (rc); rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr, i2c.offset, i2c.len, &i2c.data[0]); end_synchronized_op(sc, 0); if (rc == 0) rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); break; } default: rc = ether_ioctl(ifp, cmd, data); } return (rc); } static int cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) { struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct sge_txq *txq; void *items[1]; int rc; M_ASSERTPKTHDR(m); MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ if (__predict_false(pi->link_cfg.link_ok == false)) { m_freem(m); return (ENETDOWN); } rc = parse_pkt(sc, &m); if (__predict_false(rc != 0)) { MPASS(m == NULL); /* was freed already */ atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ return (rc); } /* Select a txq. */ txq = &sc->sge.txq[vi->first_txq]; if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + vi->rsrv_noflowq); items[0] = m; rc = mp_ring_enqueue(txq->r, items, 1, 4096); if (__predict_false(rc != 0)) m_freem(m); return (rc); } static void cxgbe_qflush(struct ifnet *ifp) { struct vi_info *vi = ifp->if_softc; struct sge_txq *txq; int i; /* queues do not exist if !VI_INIT_DONE. */ if (vi->flags & VI_INIT_DONE) { for_each_txq(vi, i, txq) { TXQ_LOCK(txq); txq->eq.flags |= EQ_QFLUSH; TXQ_UNLOCK(txq); while (!mp_ring_is_idle(txq->r)) { mp_ring_check_drainage(txq->r, 0); pause("qflush", 1); } TXQ_LOCK(txq); txq->eq.flags &= ~EQ_QFLUSH; TXQ_UNLOCK(txq); } } if_qflush(ifp); } static uint64_t vi_get_counter(struct ifnet *ifp, ift_counter c) { struct vi_info *vi = ifp->if_softc; struct fw_vi_stats_vf *s = &vi->stats; vi_refresh_stats(vi->pi->adapter, vi); switch (c) { case IFCOUNTER_IPACKETS: return (s->rx_bcast_frames + s->rx_mcast_frames + s->rx_ucast_frames); case IFCOUNTER_IERRORS: return (s->rx_err_frames); case IFCOUNTER_OPACKETS: return (s->tx_bcast_frames + s->tx_mcast_frames + s->tx_ucast_frames + s->tx_offload_frames); case IFCOUNTER_OERRORS: return (s->tx_drop_frames); case IFCOUNTER_IBYTES: return (s->rx_bcast_bytes + s->rx_mcast_bytes + s->rx_ucast_bytes); case IFCOUNTER_OBYTES: return (s->tx_bcast_bytes + s->tx_mcast_bytes + s->tx_ucast_bytes + s->tx_offload_bytes); case IFCOUNTER_IMCASTS: return (s->rx_mcast_frames); case IFCOUNTER_OMCASTS: return (s->tx_mcast_frames); case IFCOUNTER_OQDROPS: { uint64_t drops; drops = 0; if (vi->flags & VI_INIT_DONE) { int i; struct sge_txq *txq; for_each_txq(vi, i, txq) drops += counter_u64_fetch(txq->r->drops); } return (drops); } default: return (if_get_counter_default(ifp, c)); } } uint64_t cxgbe_get_counter(struct ifnet *ifp, ift_counter c) { struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct port_stats *s = &pi->stats; if (pi->nvi > 1 || sc->flags & IS_VF) return (vi_get_counter(ifp, c)); cxgbe_refresh_stats(sc, pi); switch (c) { case IFCOUNTER_IPACKETS: return (s->rx_frames); case IFCOUNTER_IERRORS: return (s->rx_jabber + s->rx_runt + s->rx_too_long + s->rx_fcs_err + s->rx_len_err); case IFCOUNTER_OPACKETS: return (s->tx_frames); case IFCOUNTER_OERRORS: return (s->tx_error_frames); case IFCOUNTER_IBYTES: return (s->rx_octets); case IFCOUNTER_OBYTES: return (s->tx_octets); case IFCOUNTER_IMCASTS: return (s->rx_mcast_frames); case IFCOUNTER_OMCASTS: return (s->tx_mcast_frames); case IFCOUNTER_IQDROPS: return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + s->rx_trunc3 + pi->tnl_cong_drops); case IFCOUNTER_OQDROPS: { uint64_t drops; drops = s->tx_drop; if (vi->flags & VI_INIT_DONE) { int i; struct sge_txq *txq; for_each_txq(vi, i, txq) drops += counter_u64_fetch(txq->r->drops); } return (drops); } default: return (if_get_counter_default(ifp, c)); } } /* * The kernel picks a media from the list we had provided but we still validate * the requeste. */ int cxgbe_media_change(struct ifnet *ifp) { struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct ifmedia *ifm = &pi->media; struct link_config *lc = &pi->link_cfg; struct adapter *sc = pi->adapter; int rc; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec"); if (rc != 0) return (rc); PORT_LOCK(pi); if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { /* ifconfig .. media autoselect */ if (!(lc->supported & FW_PORT_CAP32_ANEG)) { rc = ENOTSUP; /* AN not supported by transceiver */ goto done; } lc->requested_aneg = AUTONEG_ENABLE; lc->requested_speed = 0; lc->requested_fc |= PAUSE_AUTONEG; } else { lc->requested_aneg = AUTONEG_DISABLE; lc->requested_speed = ifmedia_baudrate(ifm->ifm_media) / 1000000; lc->requested_fc = 0; if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) lc->requested_fc |= PAUSE_RX; if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) lc->requested_fc |= PAUSE_TX; } if (pi->up_vis > 0) { fixup_link_config(pi); rc = apply_link_config(pi); } done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); return (rc); } /* * Base media word (without ETHER, pause, link active, etc.) for the port at the * given speed. */ static int port_mword(struct port_info *pi, uint32_t speed) { MPASS(speed & M_FW_PORT_CAP32_SPEED); MPASS(powerof2(speed)); switch(pi->port_type) { case FW_PORT_TYPE_BT_SGMII: case FW_PORT_TYPE_BT_XFI: case FW_PORT_TYPE_BT_XAUI: /* BaseT */ switch (speed) { case FW_PORT_CAP32_SPEED_100M: return (IFM_100_T); case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_T); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_T); } break; case FW_PORT_TYPE_KX4: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_KX4); break; case FW_PORT_TYPE_CX4: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_CX4); break; case FW_PORT_TYPE_KX: if (speed == FW_PORT_CAP32_SPEED_1G) return (IFM_1000_KX); break; case FW_PORT_TYPE_KR: case FW_PORT_TYPE_BP_AP: case FW_PORT_TYPE_BP4_AP: case FW_PORT_TYPE_BP40_BA: case FW_PORT_TYPE_KR4_100G: case FW_PORT_TYPE_KR_SFP28: case FW_PORT_TYPE_KR_XLAUI: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_KX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_KR); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_KR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_KR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_KR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_KR4); } break; case FW_PORT_TYPE_FIBER_XFI: case FW_PORT_TYPE_FIBER_XAUI: case FW_PORT_TYPE_SFP: case FW_PORT_TYPE_QSFP_10G: case FW_PORT_TYPE_QSA: case FW_PORT_TYPE_QSFP: case FW_PORT_TYPE_CR4_QSFP: case FW_PORT_TYPE_CR_QSFP: case FW_PORT_TYPE_CR2_QSFP: case FW_PORT_TYPE_SFP28: /* Pluggable transceiver */ switch (pi->mod_type) { case FW_PORT_MOD_TYPE_LR: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_LX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_LR); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_LR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_LR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_LR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_LR4); } break; case FW_PORT_MOD_TYPE_SR: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_SX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_SR); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_SR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_SR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_SR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_SR4); } break; case FW_PORT_MOD_TYPE_ER: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_ER); break; case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_CX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_TWINAX); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_CR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_CR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_CR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_CR4); } break; case FW_PORT_MOD_TYPE_LRM: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_LRM); break; case FW_PORT_MOD_TYPE_NA: MPASS(0); /* Not pluggable? */ /* fall throough */ case FW_PORT_MOD_TYPE_ERROR: case FW_PORT_MOD_TYPE_UNKNOWN: case FW_PORT_MOD_TYPE_NOTSUPPORTED: break; case FW_PORT_MOD_TYPE_NONE: return (IFM_NONE); } break; case FW_PORT_TYPE_NONE: return (IFM_NONE); } return (IFM_UNKNOWN); } void cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4med") != 0) return; PORT_LOCK(pi); if (pi->up_vis == 0) { /* * If all the interfaces are administratively down the firmware * does not report transceiver changes. Refresh port info here * so that ifconfig displays accurate ifmedia at all times. * This is the only reason we have a synchronized op in this * function. Just PORT_LOCK would have been enough otherwise. */ t4_update_port_info(pi); build_medialist(pi); } /* ifm_status */ ifmr->ifm_status = IFM_AVALID; if (lc->link_ok == false) goto done; ifmr->ifm_status |= IFM_ACTIVE; /* ifm_active */ ifmr->ifm_active = IFM_ETHER | IFM_FDX; ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE); if (lc->fc & PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; if (lc->fc & PAUSE_TX) ifmr->ifm_active |= IFM_ETH_TXPAUSE; ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed)); done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); } static int vcxgbe_probe(device_t dev) { char buf[128]; struct vi_info *vi = device_get_softc(dev); snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, vi - vi->pi->vi); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } static int alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi) { int func, index, rc; uint32_t param, val; ASSERT_SYNCHRONIZED_OP(sc); index = vi - pi->vi; MPASS(index > 0); /* This function deals with _extra_ VIs only */ KASSERT(index < nitems(vi_mac_funcs), ("%s: VI %s doesn't have a MAC func", __func__, device_get_nameunit(vi->dev))); func = vi_mac_funcs[index]; rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, - vi->hw_addr, &vi->rss_size, func, 0); + vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0); if (rc < 0) { device_printf(vi->dev, "failed to allocate virtual interface %d" "for port %d: %d\n", index, pi->port_id, -rc); return (-rc); } vi->viid = rc; - if (chip_id(sc) <= CHELSIO_T5) - vi->smt_idx = (rc & 0x7f) << 1; - else - vi->smt_idx = (rc & 0x7f); if (vi->rss_size == 1) { /* * This VI didn't get a slice of the RSS table. Reduce the * number of VIs being created (hw.cxgbe.num_vis) or modify the * configuration file (nvi, rssnvi for this PF) if this is a * problem. */ device_printf(vi->dev, "RSS table not available.\n"); vi->rss_base = 0xffff; return (0); } param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | V_FW_PARAMS_PARAM_YZ(vi->viid); rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc) vi->rss_base = 0xffff; else { MPASS((val >> 16) == vi->rss_size); vi->rss_base = val & 0xffff; } return (0); } static int vcxgbe_attach(device_t dev) { struct vi_info *vi; struct port_info *pi; struct adapter *sc; int rc; vi = device_get_softc(dev); pi = vi->pi; sc = pi->adapter; rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via"); if (rc) return (rc); rc = alloc_extra_vi(sc, pi, vi); end_synchronized_op(sc, 0); if (rc) return (rc); rc = cxgbe_vi_attach(dev, vi); if (rc) { t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); return (rc); } return (0); } static int vcxgbe_detach(device_t dev) { struct vi_info *vi; struct adapter *sc; vi = device_get_softc(dev); sc = vi->pi->adapter; doom_vi(sc, vi); cxgbe_vi_detach(vi); t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); end_synchronized_op(sc, 0); return (0); } static struct callout fatal_callout; static void delayed_panic(void *arg) { struct adapter *sc = arg; panic("%s: panic on fatal error", device_get_nameunit(sc->dev)); } void t4_fatal_err(struct adapter *sc, bool fw_error) { t4_shutdown_adapter(sc); log(LOG_ALERT, "%s: encountered fatal error, adapter stopped.\n", device_get_nameunit(sc->dev)); if (fw_error) { ASSERT_SYNCHRONIZED_OP(sc); sc->flags |= ADAP_ERR; } else { ADAPTER_LOCK(sc); sc->flags |= ADAP_ERR; ADAPTER_UNLOCK(sc); } if (t4_panic_on_fatal_err) { log(LOG_ALERT, "%s: panic on fatal error after 30s", device_get_nameunit(sc->dev)); callout_reset(&fatal_callout, hz * 30, delayed_panic, sc); } } void t4_add_adapter(struct adapter *sc) { sx_xlock(&t4_list_lock); SLIST_INSERT_HEAD(&t4_list, sc, link); sx_xunlock(&t4_list_lock); } int t4_map_bars_0_and_4(struct adapter *sc) { sc->regs_rid = PCIR_BAR(0); sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE); if (sc->regs_res == NULL) { device_printf(sc->dev, "cannot map registers.\n"); return (ENXIO); } sc->bt = rman_get_bustag(sc->regs_res); sc->bh = rman_get_bushandle(sc->regs_res); sc->mmio_len = rman_get_size(sc->regs_res); setbit(&sc->doorbells, DOORBELL_KDB); sc->msix_rid = PCIR_BAR(4); sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->msix_rid, RF_ACTIVE); if (sc->msix_res == NULL) { device_printf(sc->dev, "cannot map MSI-X BAR.\n"); return (ENXIO); } return (0); } int t4_map_bar_2(struct adapter *sc) { /* * T4: only iWARP driver uses the userspace doorbells. There is no need * to map it if RDMA is disabled. */ if (is_t4(sc) && sc->rdmacaps == 0) return (0); sc->udbs_rid = PCIR_BAR(2); sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->udbs_rid, RF_ACTIVE); if (sc->udbs_res == NULL) { device_printf(sc->dev, "cannot map doorbell BAR.\n"); return (ENXIO); } sc->udbs_base = rman_get_virtual(sc->udbs_res); if (chip_id(sc) >= CHELSIO_T5) { setbit(&sc->doorbells, DOORBELL_UDB); #if defined(__i386__) || defined(__amd64__) if (t5_write_combine) { int rc, mode; /* * Enable write combining on BAR2. This is the * userspace doorbell BAR and is split into 128B * (UDBS_SEG_SIZE) doorbell regions, each associated * with an egress queue. The first 64B has the doorbell * and the second 64B can be used to submit a tx work * request with an implicit doorbell. */ rc = pmap_change_attr((vm_offset_t)sc->udbs_base, rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); if (rc == 0) { clrbit(&sc->doorbells, DOORBELL_UDB); setbit(&sc->doorbells, DOORBELL_WCWR); setbit(&sc->doorbells, DOORBELL_UDBWC); } else { device_printf(sc->dev, "couldn't enable write combining: %d\n", rc); } mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0); t4_write_reg(sc, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) | mode); } #endif } sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0; return (0); } struct memwin_init { uint32_t base; uint32_t aperture; }; static const struct memwin_init t4_memwin[NUM_MEMWIN] = { { MEMWIN0_BASE, MEMWIN0_APERTURE }, { MEMWIN1_BASE, MEMWIN1_APERTURE }, { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } }; static const struct memwin_init t5_memwin[NUM_MEMWIN] = { { MEMWIN0_BASE, MEMWIN0_APERTURE }, { MEMWIN1_BASE, MEMWIN1_APERTURE }, { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, }; static void setup_memwin(struct adapter *sc) { const struct memwin_init *mw_init; struct memwin *mw; int i; uint32_t bar0; if (is_t4(sc)) { /* * Read low 32b of bar0 indirectly via the hardware backdoor * mechanism. Works from within PCI passthrough environments * too, where rman_get_start() can return a different value. We * need to program the T4 memory window decoders with the actual * addresses that will be coming across the PCIe link. */ bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; mw_init = &t4_memwin[0]; } else { /* T5+ use the relative offset inside the PCIe BAR */ bar0 = 0; mw_init = &t5_memwin[0]; } for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { rw_init(&mw->mw_lock, "memory window access"); mw->mw_base = mw_init->base; mw->mw_aperture = mw_init->aperture; mw->mw_curpos = 0; t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), (mw->mw_base + bar0) | V_BIR(0) | V_WINDOW(ilog2(mw->mw_aperture) - 10)); rw_wlock(&mw->mw_lock); position_memwin(sc, i, 0); rw_wunlock(&mw->mw_lock); } /* flush */ t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); } /* * Positions the memory window at the given address in the card's address space. * There are some alignment requirements and the actual position may be at an * address prior to the requested address. mw->mw_curpos always has the actual * position of the window. */ static void position_memwin(struct adapter *sc, int idx, uint32_t addr) { struct memwin *mw; uint32_t pf; uint32_t reg; MPASS(idx >= 0 && idx < NUM_MEMWIN); mw = &sc->memwin[idx]; rw_assert(&mw->mw_lock, RA_WLOCKED); if (is_t4(sc)) { pf = 0; mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ } else { pf = V_PFNUM(sc->pf); mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ } reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); t4_write_reg(sc, reg, mw->mw_curpos | pf); t4_read_reg(sc, reg); /* flush */ } int rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, int len, int rw) { struct memwin *mw; uint32_t mw_end, v; MPASS(idx >= 0 && idx < NUM_MEMWIN); /* Memory can only be accessed in naturally aligned 4 byte units */ if (addr & 3 || len & 3 || len <= 0) return (EINVAL); mw = &sc->memwin[idx]; while (len > 0) { rw_rlock(&mw->mw_lock); mw_end = mw->mw_curpos + mw->mw_aperture; if (addr >= mw_end || addr < mw->mw_curpos) { /* Will need to reposition the window */ if (!rw_try_upgrade(&mw->mw_lock)) { rw_runlock(&mw->mw_lock); rw_wlock(&mw->mw_lock); } rw_assert(&mw->mw_lock, RA_WLOCKED); position_memwin(sc, idx, addr); rw_downgrade(&mw->mw_lock); mw_end = mw->mw_curpos + mw->mw_aperture; } rw_assert(&mw->mw_lock, RA_RLOCKED); while (addr < mw_end && len > 0) { if (rw == 0) { v = t4_read_reg(sc, mw->mw_base + addr - mw->mw_curpos); *val++ = le32toh(v); } else { v = *val++; t4_write_reg(sc, mw->mw_base + addr - mw->mw_curpos, htole32(v)); } addr += 4; len -= 4; } rw_runlock(&mw->mw_lock); } return (0); } int alloc_atid_tab(struct tid_info *t, int flags) { int i; MPASS(t->natids > 0); MPASS(t->atid_tab == NULL); t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE, M_ZERO | flags); if (t->atid_tab == NULL) return (ENOMEM); mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF); t->afree = t->atid_tab; t->atids_in_use = 0; for (i = 1; i < t->natids; i++) t->atid_tab[i - 1].next = &t->atid_tab[i]; t->atid_tab[t->natids - 1].next = NULL; return (0); } void free_atid_tab(struct tid_info *t) { KASSERT(t->atids_in_use == 0, ("%s: %d atids still in use.", __func__, t->atids_in_use)); if (mtx_initialized(&t->atid_lock)) mtx_destroy(&t->atid_lock); free(t->atid_tab, M_CXGBE); t->atid_tab = NULL; } int alloc_atid(struct adapter *sc, void *ctx) { struct tid_info *t = &sc->tids; int atid = -1; mtx_lock(&t->atid_lock); if (t->afree) { union aopen_entry *p = t->afree; atid = p - t->atid_tab; MPASS(atid <= M_TID_TID); t->afree = p->next; p->data = ctx; t->atids_in_use++; } mtx_unlock(&t->atid_lock); return (atid); } void * lookup_atid(struct adapter *sc, int atid) { struct tid_info *t = &sc->tids; return (t->atid_tab[atid].data); } void free_atid(struct adapter *sc, int atid) { struct tid_info *t = &sc->tids; union aopen_entry *p = &t->atid_tab[atid]; mtx_lock(&t->atid_lock); p->next = t->afree; t->afree = p; t->atids_in_use--; mtx_unlock(&t->atid_lock); } static void queue_tid_release(struct adapter *sc, int tid) { CXGBE_UNIMPLEMENTED("deferred tid release"); } void release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq) { struct wrqe *wr; struct cpl_tid_release *req; wr = alloc_wrqe(sizeof(*req), ctrlq); if (wr == NULL) { queue_tid_release(sc, tid); /* defer */ return; } req = wrtod(wr); INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid); t4_wrq_tx(sc, wr); } static int t4_range_cmp(const void *a, const void *b) { return ((const struct t4_range *)a)->start - ((const struct t4_range *)b)->start; } /* * Verify that the memory range specified by the addr/len pair is valid within * the card's address space. */ static int validate_mem_range(struct adapter *sc, uint32_t addr, uint32_t len) { struct t4_range mem_ranges[4], *r, *next; uint32_t em, addr_len; int i, n, remaining; /* Memory can only be accessed in naturally aligned 4 byte units */ if (addr & 3 || len & 3 || len == 0) return (EINVAL); /* Enabled memories */ em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); r = &mem_ranges[0]; n = 0; bzero(r, sizeof(mem_ranges)); if (em & F_EDRAM0_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); r->size = G_EDRAM0_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EDRAM0_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } if (em & F_EDRAM1_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); r->size = G_EDRAM1_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EDRAM1_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } if (em & F_EXT_MEM_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); r->size = G_EXT_MEM_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EXT_MEM_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); r->size = G_EXT_MEM1_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EXT_MEM1_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } MPASS(n <= nitems(mem_ranges)); if (n > 1) { /* Sort and merge the ranges. */ qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); /* Start from index 0 and examine the next n - 1 entries. */ r = &mem_ranges[0]; for (remaining = n - 1; remaining > 0; remaining--, r++) { MPASS(r->size > 0); /* r is a valid entry. */ next = r + 1; MPASS(next->size > 0); /* and so is the next one. */ while (r->start + r->size >= next->start) { /* Merge the next one into the current entry. */ r->size = max(r->start + r->size, next->start + next->size) - r->start; n--; /* One fewer entry in total. */ if (--remaining == 0) goto done; /* short circuit */ next++; } if (next != r + 1) { /* * Some entries were merged into r and next * points to the first valid entry that couldn't * be merged. */ MPASS(next->size > 0); /* must be valid */ memcpy(r + 1, next, remaining * sizeof(*r)); #ifdef INVARIANTS /* * This so that the foo->size assertion in the * next iteration of the loop do the right * thing for entries that were pulled up and are * no longer valid. */ MPASS(n < nitems(mem_ranges)); bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * sizeof(struct t4_range)); #endif } } done: /* Done merging the ranges. */ MPASS(n > 0); r = &mem_ranges[0]; for (i = 0; i < n; i++, r++) { if (addr >= r->start && addr + len <= r->start + r->size) return (0); } } return (EFAULT); } static int fwmtype_to_hwmtype(int mtype) { switch (mtype) { case FW_MEMTYPE_EDC0: return (MEM_EDC0); case FW_MEMTYPE_EDC1: return (MEM_EDC1); case FW_MEMTYPE_EXTMEM: return (MEM_MC0); case FW_MEMTYPE_EXTMEM1: return (MEM_MC1); default: panic("%s: cannot translate fw mtype %d.", __func__, mtype); } } /* * Verify that the memory range specified by the memtype/offset/len pair is * valid and lies entirely within the memtype specified. The global address of * the start of the range is returned in addr. */ static int validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, uint32_t len, uint32_t *addr) { uint32_t em, addr_len, maddr; /* Memory can only be accessed in naturally aligned 4 byte units */ if (off & 3 || len & 3 || len == 0) return (EINVAL); em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); switch (fwmtype_to_hwmtype(mtype)) { case MEM_EDC0: if (!(em & F_EDRAM0_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); maddr = G_EDRAM0_BASE(addr_len) << 20; break; case MEM_EDC1: if (!(em & F_EDRAM1_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); maddr = G_EDRAM1_BASE(addr_len) << 20; break; case MEM_MC: if (!(em & F_EXT_MEM_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); maddr = G_EXT_MEM_BASE(addr_len) << 20; break; case MEM_MC1: if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); maddr = G_EXT_MEM1_BASE(addr_len) << 20; break; default: return (EINVAL); } *addr = maddr + off; /* global address */ return (validate_mem_range(sc, *addr, len)); } static int fixup_devlog_params(struct adapter *sc) { struct devlog_params *dparams = &sc->params.devlog; int rc; rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, dparams->size, &dparams->addr); return (rc); } static void update_nirq(struct intrs_and_queues *iaq, int nports) { int extra = T4_EXTRA_INTR; iaq->nirq = extra; iaq->nirq += nports * (iaq->nrxq + iaq->nofldrxq); iaq->nirq += nports * (iaq->num_vis - 1) * max(iaq->nrxq_vi, iaq->nnmrxq_vi); iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi; } /* * Adjust requirements to fit the number of interrupts available. */ static void calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype, int navail) { int old_nirq; const int nports = sc->params.nports; MPASS(nports > 0); MPASS(navail > 0); bzero(iaq, sizeof(*iaq)); iaq->intr_type = itype; iaq->num_vis = t4_num_vis; iaq->ntxq = t4_ntxq; iaq->ntxq_vi = t4_ntxq_vi; iaq->nrxq = t4_nrxq; iaq->nrxq_vi = t4_nrxq_vi; #ifdef TCP_OFFLOAD if (is_offload(sc)) { iaq->nofldtxq = t4_nofldtxq; iaq->nofldtxq_vi = t4_nofldtxq_vi; iaq->nofldrxq = t4_nofldrxq; iaq->nofldrxq_vi = t4_nofldrxq_vi; } #endif #ifdef DEV_NETMAP iaq->nnmtxq_vi = t4_nnmtxq_vi; iaq->nnmrxq_vi = t4_nnmrxq_vi; #endif update_nirq(iaq, nports); if (iaq->nirq <= navail && (itype != INTR_MSI || powerof2(iaq->nirq))) { /* * This is the normal case -- there are enough interrupts for * everything. */ goto done; } /* * If extra VIs have been configured try reducing their count and see if * that works. */ while (iaq->num_vis > 1) { iaq->num_vis--; update_nirq(iaq, nports); if (iaq->nirq <= navail && (itype != INTR_MSI || powerof2(iaq->nirq))) { device_printf(sc->dev, "virtual interfaces per port " "reduced to %d from %d. nrxq=%u, nofldrxq=%u, " "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. " "itype %d, navail %u, nirq %d.\n", iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi, itype, navail, iaq->nirq); goto done; } } /* * Extra VIs will not be created. Log a message if they were requested. */ MPASS(iaq->num_vis == 1); iaq->ntxq_vi = iaq->nrxq_vi = 0; iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; if (iaq->num_vis != t4_num_vis) { device_printf(sc->dev, "extra virtual interfaces disabled. " "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, " "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n", iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi, itype, navail, iaq->nirq); } /* * Keep reducing the number of NIC rx queues to the next lower power of * 2 (for even RSS distribution) and halving the TOE rx queues and see * if that works. */ do { if (iaq->nrxq > 1) { do { iaq->nrxq--; } while (!powerof2(iaq->nrxq)); } if (iaq->nofldrxq > 1) iaq->nofldrxq >>= 1; old_nirq = iaq->nirq; update_nirq(iaq, nports); if (iaq->nirq <= navail && (itype != INTR_MSI || powerof2(iaq->nirq))) { device_printf(sc->dev, "running with reduced number of " "rx queues because of shortage of interrupts. " "nrxq=%u, nofldrxq=%u. " "itype %d, navail %u, nirq %d.\n", iaq->nrxq, iaq->nofldrxq, itype, navail, iaq->nirq); goto done; } } while (old_nirq != iaq->nirq); /* One interrupt for everything. Ugh. */ device_printf(sc->dev, "running with minimal number of queues. " "itype %d, navail %u.\n", itype, navail); iaq->nirq = 1; MPASS(iaq->nrxq == 1); iaq->ntxq = 1; if (iaq->nofldrxq > 1) iaq->nofldtxq = 1; done: MPASS(iaq->num_vis > 0); if (iaq->num_vis > 1) { MPASS(iaq->nrxq_vi > 0); MPASS(iaq->ntxq_vi > 0); } MPASS(iaq->nirq > 0); MPASS(iaq->nrxq > 0); MPASS(iaq->ntxq > 0); if (itype == INTR_MSI) { MPASS(powerof2(iaq->nirq)); } } static int cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq) { int rc, itype, navail, nalloc; for (itype = INTR_MSIX; itype; itype >>= 1) { if ((itype & t4_intr_types) == 0) continue; /* not allowed */ if (itype == INTR_MSIX) navail = pci_msix_count(sc->dev); else if (itype == INTR_MSI) navail = pci_msi_count(sc->dev); else navail = 1; restart: if (navail == 0) continue; calculate_iaq(sc, iaq, itype, navail); nalloc = iaq->nirq; rc = 0; if (itype == INTR_MSIX) rc = pci_alloc_msix(sc->dev, &nalloc); else if (itype == INTR_MSI) rc = pci_alloc_msi(sc->dev, &nalloc); if (rc == 0 && nalloc > 0) { if (nalloc == iaq->nirq) return (0); /* * Didn't get the number requested. Use whatever number * the kernel is willing to allocate. */ device_printf(sc->dev, "fewer vectors than requested, " "type=%d, req=%d, rcvd=%d; will downshift req.\n", itype, iaq->nirq, nalloc); pci_release_msi(sc->dev); navail = nalloc; goto restart; } device_printf(sc->dev, "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", itype, rc, iaq->nirq, nalloc); } device_printf(sc->dev, "failed to find a usable interrupt type. " "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, pci_msix_count(sc->dev), pci_msi_count(sc->dev)); return (ENXIO); } #define FW_VERSION(chip) ( \ V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) /* Just enough of fw_hdr to cover all version info. */ struct fw_h { __u8 ver; __u8 chip; __be16 len512; __be32 fw_ver; __be32 tp_microcode_ver; __u8 intfver_nic; __u8 intfver_vnic; __u8 intfver_ofld; __u8 intfver_ri; __u8 intfver_iscsipdu; __u8 intfver_iscsi; __u8 intfver_fcoepdu; __u8 intfver_fcoe; }; /* Spot check a couple of fields. */ CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver)); CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic)); CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe)); struct fw_info { uint8_t chip; char *kld_name; char *fw_mod_name; struct fw_h fw_h; } fw_info[] = { { .chip = CHELSIO_T4, .kld_name = "t4fw_cfg", .fw_mod_name = "t4fw", .fw_h = { .chip = FW_HDR_CHIP_T4, .fw_ver = htobe32(FW_VERSION(T4)), .intfver_nic = FW_INTFVER(T4, NIC), .intfver_vnic = FW_INTFVER(T4, VNIC), .intfver_ofld = FW_INTFVER(T4, OFLD), .intfver_ri = FW_INTFVER(T4, RI), .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), .intfver_iscsi = FW_INTFVER(T4, ISCSI), .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), .intfver_fcoe = FW_INTFVER(T4, FCOE), }, }, { .chip = CHELSIO_T5, .kld_name = "t5fw_cfg", .fw_mod_name = "t5fw", .fw_h = { .chip = FW_HDR_CHIP_T5, .fw_ver = htobe32(FW_VERSION(T5)), .intfver_nic = FW_INTFVER(T5, NIC), .intfver_vnic = FW_INTFVER(T5, VNIC), .intfver_ofld = FW_INTFVER(T5, OFLD), .intfver_ri = FW_INTFVER(T5, RI), .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), .intfver_iscsi = FW_INTFVER(T5, ISCSI), .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), .intfver_fcoe = FW_INTFVER(T5, FCOE), }, }, { .chip = CHELSIO_T6, .kld_name = "t6fw_cfg", .fw_mod_name = "t6fw", .fw_h = { .chip = FW_HDR_CHIP_T6, .fw_ver = htobe32(FW_VERSION(T6)), .intfver_nic = FW_INTFVER(T6, NIC), .intfver_vnic = FW_INTFVER(T6, VNIC), .intfver_ofld = FW_INTFVER(T6, OFLD), .intfver_ri = FW_INTFVER(T6, RI), .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), .intfver_iscsi = FW_INTFVER(T6, ISCSI), .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), .intfver_fcoe = FW_INTFVER(T6, FCOE), }, } }; static struct fw_info * find_fw_info(int chip) { int i; for (i = 0; i < nitems(fw_info); i++) { if (fw_info[i].chip == chip) return (&fw_info[i]); } return (NULL); } /* * Is the given firmware API compatible with the one the driver was compiled * with? */ static int fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2) { /* short circuit if it's the exact same firmware version */ if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) return (1); /* * XXX: Is this too conservative? Perhaps I should limit this to the * features that are supported in the driver. */ #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) return (1); #undef SAME_INTF return (0); } static int load_fw_module(struct adapter *sc, const struct firmware **dcfg, const struct firmware **fw) { struct fw_info *fw_info; *dcfg = NULL; if (fw != NULL) *fw = NULL; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { device_printf(sc->dev, "unable to look up firmware information for chip %d.\n", chip_id(sc)); return (EINVAL); } *dcfg = firmware_get(fw_info->kld_name); if (*dcfg != NULL) { if (fw != NULL) *fw = firmware_get(fw_info->fw_mod_name); return (0); } return (ENOENT); } static void unload_fw_module(struct adapter *sc, const struct firmware *dcfg, const struct firmware *fw) { if (fw != NULL) firmware_put(fw, FIRMWARE_UNLOAD); if (dcfg != NULL) firmware_put(dcfg, FIRMWARE_UNLOAD); } /* * Return values: * 0 means no firmware install attempted. * ERESTART means a firmware install was attempted and was successful. * +ve errno means a firmware install was attempted but failed. */ static int install_kld_firmware(struct adapter *sc, struct fw_h *card_fw, const struct fw_h *drv_fw, const char *reason, int *already) { const struct firmware *cfg, *fw; const uint32_t c = be32toh(card_fw->fw_ver); uint32_t d, k; int rc, fw_install; struct fw_h bundled_fw; bool load_attempted; cfg = fw = NULL; load_attempted = false; fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install; memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw)); if (t4_fw_install < 0) { rc = load_fw_module(sc, &cfg, &fw); if (rc != 0 || fw == NULL) { device_printf(sc->dev, "failed to load firmware module: %d. cfg %p, fw %p;" " will use compiled-in firmware version for" "hw.cxgbe.fw_install checks.\n", rc, cfg, fw); } else { memcpy(&bundled_fw, fw->data, sizeof(bundled_fw)); } load_attempted = true; } d = be32toh(bundled_fw.fw_ver); if (reason != NULL) goto install; if ((sc->flags & FW_OK) == 0) { if (c == 0xffffffff) { reason = "missing"; goto install; } rc = 0; goto done; } if (!fw_compatible(card_fw, &bundled_fw)) { reason = "incompatible or unusable"; goto install; } if (d > c) { reason = "older than the version bundled with this driver"; goto install; } if (fw_install == 2 && d != c) { reason = "different than the version bundled with this driver"; goto install; } /* No reason to do anything to the firmware already on the card. */ rc = 0; goto done; install: rc = 0; if ((*already)++) goto done; if (fw_install == 0) { device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "but the driver is prohibited from installing a firmware " "on the card.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); goto done; } /* * We'll attempt to install a firmware. Load the module first (if it * hasn't been loaded already). */ if (!load_attempted) { rc = load_fw_module(sc, &cfg, &fw); if (rc != 0 || fw == NULL) { device_printf(sc->dev, "failed to load firmware module: %d. cfg %p, fw %p\n", rc, cfg, fw); /* carry on */ } } if (fw == NULL) { device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "but the driver cannot take corrective action because it " "is unable to load the firmware module.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); rc = sc->flags & FW_OK ? 0 : ENOENT; goto done; } k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver); if (k != d) { MPASS(t4_fw_install > 0); device_printf(sc->dev, "firmware in KLD (%u.%u.%u.%u) is not what the driver was " "expecting (%u.%u.%u.%u) and will not be used.\n", G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k), G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); rc = sc->flags & FW_OK ? 0 : EINVAL; goto done; } device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "installing firmware %u.%u.%u.%u on card.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); if (rc != 0) { device_printf(sc->dev, "failed to install firmware: %d\n", rc); } else { /* Installed successfully, update the cached header too. */ rc = ERESTART; memcpy(card_fw, fw->data, sizeof(*card_fw)); } done: unload_fw_module(sc, cfg, fw); return (rc); } /* * Establish contact with the firmware and attempt to become the master driver. * * A firmware will be installed to the card if needed (if the driver is allowed * to do so). */ static int contact_firmware(struct adapter *sc) { int rc, already = 0; enum dev_state state; struct fw_info *fw_info; struct fw_hdr *card_fw; /* fw on the card */ const struct fw_h *drv_fw; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { device_printf(sc->dev, "unable to look up firmware information for chip %d.\n", chip_id(sc)); return (EINVAL); } drv_fw = &fw_info->fw_h; /* Read the header of the firmware on the card */ card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); restart: rc = -t4_get_fw_hdr(sc, card_fw); if (rc != 0) { device_printf(sc->dev, "unable to read firmware header from card's flash: %d\n", rc); goto done; } rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL, &already); if (rc == ERESTART) goto restart; if (rc != 0) goto done; rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); if (rc < 0 || state == DEV_STATE_ERR) { rc = -rc; device_printf(sc->dev, "failed to connect to the firmware: %d, %d. " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); #if 0 if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, "not responding properly to HELLO", &already) == ERESTART) goto restart; #endif goto done; } MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT); sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */ if (rc == sc->pf) { sc->flags |= MASTER_PF; rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL, &already); if (rc == ERESTART) rc = 0; else if (rc != 0) goto done; } else if (state == DEV_STATE_UNINIT) { /* * We didn't get to be the master so we definitely won't be * configuring the chip. It's a bug if someone else hasn't * configured it already. */ device_printf(sc->dev, "couldn't be master(%d), " "device not already initialized either(%d). " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); rc = EPROTO; goto done; } else { /* * Some other PF is the master and has configured the chip. * This is allowed but untested. */ device_printf(sc->dev, "PF%d is master, device state %d. " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc); sc->cfcsum = 0; rc = 0; } done: if (rc != 0 && sc->flags & FW_OK) { t4_fw_bye(sc, sc->mbox); sc->flags &= ~FW_OK; } free(card_fw, M_CXGBE); return (rc); } static int copy_cfg_file_to_card(struct adapter *sc, char *cfg_file, uint32_t mtype, uint32_t moff) { struct fw_info *fw_info; const struct firmware *dcfg, *rcfg = NULL; const uint32_t *cfdata; uint32_t cflen, addr; int rc; load_fw_module(sc, &dcfg, NULL); /* Card specific interpretation of "default". */ if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { if (pci_get_device(sc->dev) == 0x440a) snprintf(cfg_file, sizeof(t4_cfg_file), UWIRE_CF); if (is_fpga(sc)) snprintf(cfg_file, sizeof(t4_cfg_file), FPGA_CF); } if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { if (dcfg == NULL) { device_printf(sc->dev, "KLD with default config is not available.\n"); rc = ENOENT; goto done; } cfdata = dcfg->data; cflen = dcfg->datasize & ~3; } else { char s[32]; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { device_printf(sc->dev, "unable to look up firmware information for chip %d.\n", chip_id(sc)); rc = EINVAL; goto done; } snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file); rcfg = firmware_get(s); if (rcfg == NULL) { device_printf(sc->dev, "unable to load module \"%s\" for configuration " "profile \"%s\".\n", s, cfg_file); rc = ENOENT; goto done; } cfdata = rcfg->data; cflen = rcfg->datasize & ~3; } if (cflen > FLASH_CFG_MAX_SIZE) { device_printf(sc->dev, "config file too long (%d, max allowed is %d).\n", cflen, FLASH_CFG_MAX_SIZE); rc = EINVAL; goto done; } rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); if (rc != 0) { device_printf(sc->dev, "%s: addr (%d/0x%x) or len %d is not valid: %d.\n", __func__, mtype, moff, cflen, rc); rc = EINVAL; goto done; } write_via_memwin(sc, 2, addr, cfdata, cflen); done: if (rcfg != NULL) firmware_put(rcfg, FIRMWARE_UNLOAD); unload_fw_module(sc, dcfg, NULL); return (rc); } struct caps_allowed { uint16_t nbmcaps; uint16_t linkcaps; uint16_t switchcaps; uint16_t niccaps; uint16_t toecaps; uint16_t rdmacaps; uint16_t cryptocaps; uint16_t iscsicaps; uint16_t fcoecaps; }; #define FW_PARAM_DEV(param) \ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) #define FW_PARAM_PFVF(param) \ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) /* * Provide a configuration profile to the firmware and have it initialize the * chip accordingly. This may involve uploading a configuration file to the * card. */ static int apply_cfg_and_initialize(struct adapter *sc, char *cfg_file, const struct caps_allowed *caps_allowed) { int rc; struct fw_caps_config_cmd caps; uint32_t mtype, moff, finicsum, cfcsum, param, val; rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); if (rc != 0) { device_printf(sc->dev, "firmware reset failed: %d.\n", rc); return (rc); } bzero(&caps, sizeof(caps)); caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ); if (strncmp(cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0) { mtype = 0; moff = 0; caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); } else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) { mtype = FW_MEMTYPE_FLASH; moff = t4_flash_cfg_addr(sc); caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); } else { /* * Ask the firmware where it wants us to upload the config file. */ param = FW_PARAM_DEV(CF); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc != 0) { /* No support for config file? Shouldn't happen. */ device_printf(sc->dev, "failed to query config file location: %d.\n", rc); goto done; } mtype = G_FW_PARAMS_PARAM_Y(val); moff = G_FW_PARAMS_PARAM_Z(val) << 16; caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff); if (rc != 0) { device_printf(sc->dev, "failed to upload config file to card: %d.\n", rc); goto done; } } rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); if (rc != 0) { device_printf(sc->dev, "failed to pre-process config file: %d " "(mtype %d, moff 0x%x).\n", rc, mtype, moff); goto done; } finicsum = be32toh(caps.finicsum); cfcsum = be32toh(caps.cfcsum); /* actual */ if (finicsum != cfcsum) { device_printf(sc->dev, "WARNING: config file checksum mismatch: %08x %08x\n", finicsum, cfcsum); } sc->cfcsum = cfcsum; snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file); /* * Let the firmware know what features will (not) be used so it can tune * things accordingly. */ #define LIMIT_CAPS(x) do { \ caps.x##caps &= htobe16(caps_allowed->x##caps); \ } while (0) LIMIT_CAPS(nbm); LIMIT_CAPS(link); LIMIT_CAPS(switch); LIMIT_CAPS(nic); LIMIT_CAPS(toe); LIMIT_CAPS(rdma); LIMIT_CAPS(crypto); LIMIT_CAPS(iscsi); LIMIT_CAPS(fcoe); #undef LIMIT_CAPS if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) { /* * TOE and hashfilters are mutually exclusive. It is a config * file or firmware bug if both are reported as available. Try * to cope with the situation in non-debug builds by disabling * TOE. */ MPASS(caps.toecaps == 0); caps.toecaps = 0; caps.rdmacaps = 0; caps.iscsicaps = 0; } caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); if (rc != 0) { device_printf(sc->dev, "failed to process config file: %d.\n", rc); goto done; } t4_tweak_chip_settings(sc); set_params__pre_init(sc); /* get basic stuff going */ rc = -t4_fw_initialize(sc, sc->mbox); if (rc != 0) { device_printf(sc->dev, "fw_initialize failed: %d.\n", rc); goto done; } done: return (rc); } /* * Partition chip resources for use between various PFs, VFs, etc. */ static int partition_resources(struct adapter *sc) { char cfg_file[sizeof(t4_cfg_file)]; struct caps_allowed caps_allowed; int rc; bool fallback; /* Only the master driver gets to configure the chip resources. */ MPASS(sc->flags & MASTER_PF); #define COPY_CAPS(x) do { \ caps_allowed.x##caps = t4_##x##caps_allowed; \ } while (0) bzero(&caps_allowed, sizeof(caps_allowed)); COPY_CAPS(nbm); COPY_CAPS(link); COPY_CAPS(switch); COPY_CAPS(nic); COPY_CAPS(toe); COPY_CAPS(rdma); COPY_CAPS(crypto); COPY_CAPS(iscsi); COPY_CAPS(fcoe); fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true; snprintf(cfg_file, sizeof(cfg_file), "%s", t4_cfg_file); retry: rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed); if (rc != 0 && fallback) { device_printf(sc->dev, "failed (%d) to configure card with \"%s\" profile, " "will fall back to a basic configuration and retry.\n", rc, cfg_file); snprintf(cfg_file, sizeof(cfg_file), "%s", BUILTIN_CF); bzero(&caps_allowed, sizeof(caps_allowed)); COPY_CAPS(nbm); COPY_CAPS(link); COPY_CAPS(switch); COPY_CAPS(nic); fallback = false; goto retry; } #undef COPY_CAPS return (rc); } /* * Retrieve parameters that are needed (or nice to have) very early. */ static int get_params__pre_init(struct adapter *sc) { int rc; uint32_t param[2], val[2]; t4_get_version_info(sc); snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); param[0] = FW_PARAM_DEV(PORTVEC); param[1] = FW_PARAM_DEV(CCLK); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query parameters (pre_init): %d.\n", rc); return (rc); } sc->params.portvec = val[0]; sc->params.nports = bitcount32(val[0]); sc->params.vpd.cclk = val[1]; /* Read device log parameters. */ rc = -t4_init_devlog_params(sc, 1); if (rc == 0) fixup_devlog_params(sc); else { device_printf(sc->dev, "failed to get devlog parameters: %d.\n", rc); rc = 0; /* devlog isn't critical for device operation */ } return (rc); } /* * Any params that need to be set before FW_INITIALIZE. */ static int set_params__pre_init(struct adapter *sc) { int rc = 0; uint32_t param, val; if (chip_id(sc) >= CHELSIO_T6) { param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT); val = 1; rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); /* firmwares < 1.20.1.0 do not have this param. */ if (rc == FW_EINVAL && sc->params.fw_vers < (V_FW_HDR_FW_VER_MAJOR(1) | V_FW_HDR_FW_VER_MINOR(20) | V_FW_HDR_FW_VER_MICRO(1) | V_FW_HDR_FW_VER_BUILD(0))) { rc = 0; } if (rc != 0) { device_printf(sc->dev, "failed to enable high priority filters :%d.\n", rc); } } + /* Enable opaque VIIDs with firmwares that support it. */ + param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN); + val = 1; + rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); + if (rc == 0 && val == 1) + sc->params.viid_smt_extn_support = true; + else + sc->params.viid_smt_extn_support = false; + return (rc); } /* * Retrieve various parameters that are of interest to the driver. The device * has been initialized by the firmware at this point. */ static int get_params__post_init(struct adapter *sc) { int rc; uint32_t param[7], val[7]; struct fw_caps_config_cmd caps; param[0] = FW_PARAM_PFVF(IQFLINT_START); param[1] = FW_PARAM_PFVF(EQ_START); param[2] = FW_PARAM_PFVF(FILTER_START); param[3] = FW_PARAM_PFVF(FILTER_END); param[4] = FW_PARAM_PFVF(L2T_START); param[5] = FW_PARAM_PFVF(L2T_END); param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query parameters (post_init): %d.\n", rc); return (rc); } sc->sge.iq_start = val[0]; sc->sge.eq_start = val[1]; if ((int)val[3] > (int)val[2]) { sc->tids.ftid_base = val[2]; sc->tids.ftid_end = val[3]; sc->tids.nftids = val[3] - val[2] + 1; } sc->vres.l2t.start = val[4]; sc->vres.l2t.size = val[5] - val[4] + 1; KASSERT(sc->vres.l2t.size <= L2T_SIZE, ("%s: L2 table size (%u) larger than expected (%u)", __func__, sc->vres.l2t.size, L2T_SIZE)); sc->params.core_vdd = val[6]; if (chip_id(sc) >= CHELSIO_T6) { sc->tids.tid_base = t4_read_reg(sc, A_LE_DB_ACTIVE_TABLE_START_INDEX); param[0] = FW_PARAM_PFVF(HPFILTER_START); param[1] = FW_PARAM_PFVF(HPFILTER_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query hpfilter parameters: %d.\n", rc); return (rc); } if ((int)val[1] > (int)val[0]) { sc->tids.hpftid_base = val[0]; sc->tids.hpftid_end = val[1]; sc->tids.nhpftids = val[1] - val[0] + 1; /* * These should go off if the layout changes and the * driver needs to catch up. */ MPASS(sc->tids.hpftid_base == 0); MPASS(sc->tids.tid_base == sc->tids.nhpftids); } } /* * MPSBGMAP is queried separately because only recent firmwares support * it as a parameter and we don't want the compound query above to fail * on older firmwares. */ param[0] = FW_PARAM_DEV(MPSBGMAP); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.mps_bg_map = val[0]; else sc->params.mps_bg_map = 0; /* * Determine whether the firmware supports the filter2 work request. * This is queried separately for the same reason as MPSBGMAP above. */ param[0] = FW_PARAM_DEV(FILTER2_WR); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.filter2_wr_support = val[0] != 0; else sc->params.filter2_wr_support = 0; /* * Find out whether we're allowed to use the ULPTX MEMWRITE DSGL. * This is queried separately for the same reason as other params above. */ param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.ulptx_memwrite_dsgl = val[0] != 0; else sc->params.ulptx_memwrite_dsgl = false; /* get capabilites */ bzero(&caps, sizeof(caps)); caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ); caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); if (rc != 0) { device_printf(sc->dev, "failed to get card capabilities: %d.\n", rc); return (rc); } #define READ_CAPS(x) do { \ sc->x = htobe16(caps.x); \ } while (0) READ_CAPS(nbmcaps); READ_CAPS(linkcaps); READ_CAPS(switchcaps); READ_CAPS(niccaps); READ_CAPS(toecaps); READ_CAPS(rdmacaps); READ_CAPS(cryptocaps); READ_CAPS(iscsicaps); READ_CAPS(fcoecaps); if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) { MPASS(chip_id(sc) > CHELSIO_T4); MPASS(sc->toecaps == 0); sc->toecaps = 0; param[0] = FW_PARAM_DEV(NTID); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query HASHFILTER parameters: %d.\n", rc); return (rc); } sc->tids.ntids = val[0]; if (sc->params.fw_vers < (V_FW_HDR_FW_VER_MAJOR(1) | V_FW_HDR_FW_VER_MINOR(20) | V_FW_HDR_FW_VER_MICRO(5) | V_FW_HDR_FW_VER_BUILD(0))) { MPASS(sc->tids.ntids >= sc->tids.nhpftids); sc->tids.ntids -= sc->tids.nhpftids; } sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); sc->params.hash_filter = 1; } if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { param[0] = FW_PARAM_PFVF(ETHOFLD_START); param[1] = FW_PARAM_PFVF(ETHOFLD_END); param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query NIC parameters: %d.\n", rc); return (rc); } if ((int)val[1] > (int)val[0]) { sc->tids.etid_base = val[0]; sc->tids.etid_end = val[1]; sc->tids.netids = val[1] - val[0] + 1; sc->params.eo_wr_cred = val[2]; sc->params.ethoffload = 1; } } if (sc->toecaps) { /* query offload-related parameters */ param[0] = FW_PARAM_DEV(NTID); param[1] = FW_PARAM_PFVF(SERVER_START); param[2] = FW_PARAM_PFVF(SERVER_END); param[3] = FW_PARAM_PFVF(TDDP_START); param[4] = FW_PARAM_PFVF(TDDP_END); param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query TOE parameters: %d.\n", rc); return (rc); } sc->tids.ntids = val[0]; if (sc->params.fw_vers < (V_FW_HDR_FW_VER_MAJOR(1) | V_FW_HDR_FW_VER_MINOR(20) | V_FW_HDR_FW_VER_MICRO(5) | V_FW_HDR_FW_VER_BUILD(0))) { MPASS(sc->tids.ntids >= sc->tids.nhpftids); sc->tids.ntids -= sc->tids.nhpftids; } sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); if ((int)val[2] > (int)val[1]) { sc->tids.stid_base = val[1]; sc->tids.nstids = val[2] - val[1] + 1; } sc->vres.ddp.start = val[3]; sc->vres.ddp.size = val[4] - val[3] + 1; sc->params.ofldq_wr_cred = val[5]; sc->params.offload = 1; } else { /* * The firmware attempts memfree TOE configuration for -SO cards * and will report toecaps=0 if it runs out of resources (this * depends on the config file). It may not report 0 for other * capabilities dependent on the TOE in this case. Set them to * 0 here so that the driver doesn't bother tracking resources * that will never be used. */ sc->iscsicaps = 0; sc->rdmacaps = 0; } if (sc->rdmacaps) { param[0] = FW_PARAM_PFVF(STAG_START); param[1] = FW_PARAM_PFVF(STAG_END); param[2] = FW_PARAM_PFVF(RQ_START); param[3] = FW_PARAM_PFVF(RQ_END); param[4] = FW_PARAM_PFVF(PBL_START); param[5] = FW_PARAM_PFVF(PBL_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query RDMA parameters(1): %d.\n", rc); return (rc); } sc->vres.stag.start = val[0]; sc->vres.stag.size = val[1] - val[0] + 1; sc->vres.rq.start = val[2]; sc->vres.rq.size = val[3] - val[2] + 1; sc->vres.pbl.start = val[4]; sc->vres.pbl.size = val[5] - val[4] + 1; param[0] = FW_PARAM_PFVF(SQRQ_START); param[1] = FW_PARAM_PFVF(SQRQ_END); param[2] = FW_PARAM_PFVF(CQ_START); param[3] = FW_PARAM_PFVF(CQ_END); param[4] = FW_PARAM_PFVF(OCQ_START); param[5] = FW_PARAM_PFVF(OCQ_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query RDMA parameters(2): %d.\n", rc); return (rc); } sc->vres.qp.start = val[0]; sc->vres.qp.size = val[1] - val[0] + 1; sc->vres.cq.start = val[2]; sc->vres.cq.size = val[3] - val[2] + 1; sc->vres.ocq.start = val[4]; sc->vres.ocq.size = val[5] - val[4] + 1; param[0] = FW_PARAM_PFVF(SRQ_START); param[1] = FW_PARAM_PFVF(SRQ_END); param[2] = FW_PARAM_DEV(MAXORDIRD_QP); param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query RDMA parameters(3): %d.\n", rc); return (rc); } sc->vres.srq.start = val[0]; sc->vres.srq.size = val[1] - val[0] + 1; sc->params.max_ordird_qp = val[2]; sc->params.max_ird_adapter = val[3]; } if (sc->iscsicaps) { param[0] = FW_PARAM_PFVF(ISCSI_START); param[1] = FW_PARAM_PFVF(ISCSI_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query iSCSI parameters: %d.\n", rc); return (rc); } sc->vres.iscsi.start = val[0]; sc->vres.iscsi.size = val[1] - val[0] + 1; } if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) { param[0] = FW_PARAM_PFVF(TLS_START); param[1] = FW_PARAM_PFVF(TLS_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query TLS parameters: %d.\n", rc); return (rc); } sc->vres.key.start = val[0]; sc->vres.key.size = val[1] - val[0] + 1; } t4_init_sge_params(sc); /* * We've got the params we wanted to query via the firmware. Now grab * some others directly from the chip. */ rc = t4_read_chip_settings(sc); return (rc); } static int set_params__post_init(struct adapter *sc) { uint32_t param, val; #ifdef TCP_OFFLOAD int i, v, shift; #endif /* ask for encapsulated CPLs */ param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); val = 1; (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); /* Enable 32b port caps if the firmware supports it. */ param = FW_PARAM_PFVF(PORT_CAPS32); val = 1; if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val) == 0) sc->params.port_caps32 = 1; #ifdef TCP_OFFLOAD /* * Override the TOE timers with user provided tunables. This is not the * recommended way to change the timers (the firmware config file is) so * these tunables are not documented. * * All the timer tunables are in microseconds. */ if (t4_toe_keepalive_idle != 0) { v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle); v &= M_KEEPALIVEIDLE; t4_set_reg_field(sc, A_TP_KEEP_IDLE, V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v)); } if (t4_toe_keepalive_interval != 0) { v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval); v &= M_KEEPALIVEINTVL; t4_set_reg_field(sc, A_TP_KEEP_INTVL, V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v)); } if (t4_toe_keepalive_count != 0) { v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2; t4_set_reg_field(sc, A_TP_SHIFT_CNT, V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) | V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2), V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v)); } if (t4_toe_rexmt_min != 0) { v = us_to_tcp_ticks(sc, t4_toe_rexmt_min); v &= M_RXTMIN; t4_set_reg_field(sc, A_TP_RXT_MIN, V_RXTMIN(M_RXTMIN), V_RXTMIN(v)); } if (t4_toe_rexmt_max != 0) { v = us_to_tcp_ticks(sc, t4_toe_rexmt_max); v &= M_RXTMAX; t4_set_reg_field(sc, A_TP_RXT_MAX, V_RXTMAX(M_RXTMAX), V_RXTMAX(v)); } if (t4_toe_rexmt_count != 0) { v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2; t4_set_reg_field(sc, A_TP_SHIFT_CNT, V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) | V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2), V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v)); } for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) { if (t4_toe_rexmt_backoff[i] != -1) { v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0; shift = (i & 3) << 3; t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3), M_TIMERBACKOFFINDEX0 << shift, v << shift); } } #endif return (0); } #undef FW_PARAM_PFVF #undef FW_PARAM_DEV static void t4_set_desc(struct adapter *sc) { char buf[128]; struct adapter_params *p = &sc->params; snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id); device_set_desc_copy(sc->dev, buf); } static inline void ifmedia_add4(struct ifmedia *ifm, int m) { ifmedia_add(ifm, m, 0, NULL); ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL); ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL); ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL); } /* * This is the selected media, which is not quite the same as the active media. * The media line in ifconfig is "media: Ethernet selected (active)" if selected * and active are not the same, and "media: Ethernet selected" otherwise. */ static void set_current_media(struct port_info *pi) { struct link_config *lc; struct ifmedia *ifm; int mword; u_int speed; PORT_LOCK_ASSERT_OWNED(pi); /* Leave current media alone if it's already set to IFM_NONE. */ ifm = &pi->media; if (ifm->ifm_cur != NULL && IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE) return; lc = &pi->link_cfg; if (lc->requested_aneg != AUTONEG_DISABLE && lc->supported & FW_PORT_CAP32_ANEG) { ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); return; } mword = IFM_ETHER | IFM_FDX; if (lc->requested_fc & PAUSE_TX) mword |= IFM_ETH_TXPAUSE; if (lc->requested_fc & PAUSE_RX) mword |= IFM_ETH_RXPAUSE; if (lc->requested_speed == 0) speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */ else speed = lc->requested_speed; mword |= port_mword(pi, speed_to_fwcap(speed)); ifmedia_set(ifm, mword); } /* * Returns true if the ifmedia list for the port cannot change. */ static bool fixed_ifmedia(struct port_info *pi) { return (pi->port_type == FW_PORT_TYPE_BT_SGMII || pi->port_type == FW_PORT_TYPE_BT_XFI || pi->port_type == FW_PORT_TYPE_BT_XAUI || pi->port_type == FW_PORT_TYPE_KX4 || pi->port_type == FW_PORT_TYPE_KX || pi->port_type == FW_PORT_TYPE_KR || pi->port_type == FW_PORT_TYPE_BP_AP || pi->port_type == FW_PORT_TYPE_BP4_AP || pi->port_type == FW_PORT_TYPE_BP40_BA || pi->port_type == FW_PORT_TYPE_KR4_100G || pi->port_type == FW_PORT_TYPE_KR_SFP28 || pi->port_type == FW_PORT_TYPE_KR_XLAUI); } static void build_medialist(struct port_info *pi) { uint32_t ss, speed; int unknown, mword, bit; struct link_config *lc; struct ifmedia *ifm; PORT_LOCK_ASSERT_OWNED(pi); if (pi->flags & FIXED_IFMEDIA) return; /* * Rebuild the ifmedia list. */ ifm = &pi->media; ifmedia_removeall(ifm); lc = &pi->link_cfg; ss = G_FW_PORT_CAP32_SPEED(lc->supported); /* Supported Speeds */ if (__predict_false(ss == 0)) { /* not supposed to happen. */ MPASS(ss != 0); no_media: MPASS(LIST_EMPTY(&ifm->ifm_list)); ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL); ifmedia_set(ifm, IFM_ETHER | IFM_NONE); return; } unknown = 0; for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) { speed = 1 << bit; MPASS(speed & M_FW_PORT_CAP32_SPEED); if (ss & speed) { mword = port_mword(pi, speed); if (mword == IFM_NONE) { goto no_media; } else if (mword == IFM_UNKNOWN) unknown++; else ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword); } } if (unknown > 0) /* Add one unknown for all unknown media types. */ ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN); if (lc->supported & FW_PORT_CAP32_ANEG) ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL); set_current_media(pi); } /* * Initialize the requested fields in the link config based on driver tunables. */ static void init_link_config(struct port_info *pi) { struct link_config *lc = &pi->link_cfg; PORT_LOCK_ASSERT_OWNED(pi); lc->requested_speed = 0; if (t4_autoneg == 0) lc->requested_aneg = AUTONEG_DISABLE; else if (t4_autoneg == 1) lc->requested_aneg = AUTONEG_ENABLE; else lc->requested_aneg = AUTONEG_AUTO; lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG); if (t4_fec == -1 || t4_fec & FEC_AUTO) lc->requested_fec = FEC_AUTO; else { lc->requested_fec = FEC_NONE; if (t4_fec & FEC_RS) lc->requested_fec |= FEC_RS; if (t4_fec & FEC_BASER_RS) lc->requested_fec |= FEC_BASER_RS; } } /* * Makes sure that all requested settings comply with what's supported by the * port. Returns the number of settings that were invalid and had to be fixed. */ static int fixup_link_config(struct port_info *pi) { int n = 0; struct link_config *lc = &pi->link_cfg; uint32_t fwspeed; PORT_LOCK_ASSERT_OWNED(pi); /* Speed (when not autonegotiating) */ if (lc->requested_speed != 0) { fwspeed = speed_to_fwcap(lc->requested_speed); if ((fwspeed & lc->supported) == 0) { n++; lc->requested_speed = 0; } } /* Link autonegotiation */ MPASS(lc->requested_aneg == AUTONEG_ENABLE || lc->requested_aneg == AUTONEG_DISABLE || lc->requested_aneg == AUTONEG_AUTO); if (lc->requested_aneg == AUTONEG_ENABLE && !(lc->supported & FW_PORT_CAP32_ANEG)) { n++; lc->requested_aneg = AUTONEG_AUTO; } /* Flow control */ MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0); if (lc->requested_fc & PAUSE_TX && !(lc->supported & FW_PORT_CAP32_FC_TX)) { n++; lc->requested_fc &= ~PAUSE_TX; } if (lc->requested_fc & PAUSE_RX && !(lc->supported & FW_PORT_CAP32_FC_RX)) { n++; lc->requested_fc &= ~PAUSE_RX; } if (!(lc->requested_fc & PAUSE_AUTONEG) && !(lc->supported & FW_PORT_CAP32_FORCE_PAUSE)) { n++; lc->requested_fc |= PAUSE_AUTONEG; } /* FEC */ if ((lc->requested_fec & FEC_RS && !(lc->supported & FW_PORT_CAP32_FEC_RS)) || (lc->requested_fec & FEC_BASER_RS && !(lc->supported & FW_PORT_CAP32_FEC_BASER_RS))) { n++; lc->requested_fec = FEC_AUTO; } return (n); } /* * Apply the requested L1 settings, which are expected to be valid, to the * hardware. */ static int apply_link_config(struct port_info *pi) { struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; #ifdef INVARIANTS ASSERT_SYNCHRONIZED_OP(sc); PORT_LOCK_ASSERT_OWNED(pi); if (lc->requested_aneg == AUTONEG_ENABLE) MPASS(lc->supported & FW_PORT_CAP32_ANEG); if (!(lc->requested_fc & PAUSE_AUTONEG)) MPASS(lc->supported & FW_PORT_CAP32_FORCE_PAUSE); if (lc->requested_fc & PAUSE_TX) MPASS(lc->supported & FW_PORT_CAP32_FC_TX); if (lc->requested_fc & PAUSE_RX) MPASS(lc->supported & FW_PORT_CAP32_FC_RX); if (lc->requested_fec & FEC_RS) MPASS(lc->supported & FW_PORT_CAP32_FEC_RS); if (lc->requested_fec & FEC_BASER_RS) MPASS(lc->supported & FW_PORT_CAP32_FEC_BASER_RS); #endif rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); if (rc != 0) { /* Don't complain if the VF driver gets back an EPERM. */ if (!(sc->flags & IS_VF) || rc != FW_EPERM) device_printf(pi->dev, "l1cfg failed: %d\n", rc); } else { /* * An L1_CFG will almost always result in a link-change event if * the link is up, and the driver will refresh the actual * fec/fc/etc. when the notification is processed. If the link * is down then the actual settings are meaningless. * * This takes care of the case where a change in the L1 settings * may not result in a notification. */ if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG)) lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX); } return (rc); } #define FW_MAC_EXACT_CHUNK 7 /* * Program the port's XGMAC based on parameters in ifnet. The caller also * indicates which parameters should be programmed (the rest are left alone). */ int update_mac_settings(struct ifnet *ifp, int flags) { int rc = 0; struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; ASSERT_SYNCHRONIZED_OP(sc); KASSERT(flags, ("%s: not told what to update.", __func__)); if (flags & XGMAC_MTU) mtu = ifp->if_mtu; if (flags & XGMAC_PROMISC) promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; if (flags & XGMAC_ALLMULTI) allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; if (flags & XGMAC_VLANEX) vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, allmulti, 1, vlanex, false); if (rc) { if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc); return (rc); } } if (flags & XGMAC_UCADDR) { uint8_t ucaddr[ETHER_ADDR_LEN]; bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, - ucaddr, true, true); + ucaddr, true, &vi->smt_idx); if (rc < 0) { rc = -rc; if_printf(ifp, "change_mac failed: %d\n", rc); return (rc); } else { vi->xact_addr_filt = rc; rc = 0; } } if (flags & XGMAC_MCADDRS) { const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; int del = 1; uint64_t hash = 0; struct ifmultiaddr *ifma; int i = 0, j; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mcaddr[i] = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); MPASS(ETHER_IS_MULTICAST(mcaddr[i])); i++; if (i == FW_MAC_EXACT_CHUNK) { rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, mcaddr, NULL, &hash, 0); if (rc < 0) { rc = -rc; for (j = 0; j < i; j++) { if_printf(ifp, "failed to add mc address" " %02x:%02x:%02x:" "%02x:%02x:%02x rc=%d\n", mcaddr[j][0], mcaddr[j][1], mcaddr[j][2], mcaddr[j][3], mcaddr[j][4], mcaddr[j][5], rc); } goto mcfail; } del = 0; i = 0; } } if (i > 0) { rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, mcaddr, NULL, &hash, 0); if (rc < 0) { rc = -rc; for (j = 0; j < i; j++) { if_printf(ifp, "failed to add mc address" " %02x:%02x:%02x:" "%02x:%02x:%02x rc=%d\n", mcaddr[j][0], mcaddr[j][1], mcaddr[j][2], mcaddr[j][3], mcaddr[j][4], mcaddr[j][5], rc); } goto mcfail; } } rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); if (rc != 0) if_printf(ifp, "failed to set mc address hash: %d", rc); mcfail: if_maddr_runlock(ifp); } return (rc); } /* * {begin|end}_synchronized_op must be called from the same thread. */ int begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, char *wmesg) { int rc, pri; #ifdef WITNESS /* the caller thinks it's ok to sleep, but is it really? */ if (flags & SLEEP_OK) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "begin_synchronized_op"); #endif if (INTR_OK) pri = PCATCH; else pri = 0; ADAPTER_LOCK(sc); for (;;) { if (vi && IS_DOOMED(vi)) { rc = ENXIO; goto done; } if (!IS_BUSY(sc)) { rc = 0; break; } if (!(flags & SLEEP_OK)) { rc = EBUSY; goto done; } if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { rc = EINTR; goto done; } } KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); SET_BUSY(sc); #ifdef INVARIANTS sc->last_op = wmesg; sc->last_op_thr = curthread; sc->last_op_flags = flags; #endif done: if (!(flags & HOLD_LOCK) || rc) ADAPTER_UNLOCK(sc); return (rc); } /* * Tell if_ioctl and if_init that the VI is going away. This is * special variant of begin_synchronized_op and must be paired with a * call to end_synchronized_op. */ void doom_vi(struct adapter *sc, struct vi_info *vi) { ADAPTER_LOCK(sc); SET_DOOMED(vi); wakeup(&sc->flags); while (IS_BUSY(sc)) mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); SET_BUSY(sc); #ifdef INVARIANTS sc->last_op = "t4detach"; sc->last_op_thr = curthread; sc->last_op_flags = 0; #endif ADAPTER_UNLOCK(sc); } /* * {begin|end}_synchronized_op must be called from the same thread. */ void end_synchronized_op(struct adapter *sc, int flags) { if (flags & LOCK_HELD) ADAPTER_LOCK_ASSERT_OWNED(sc); else ADAPTER_LOCK(sc); KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); CLR_BUSY(sc); wakeup(&sc->flags); ADAPTER_UNLOCK(sc); } static int cxgbe_init_synchronized(struct vi_info *vi) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct ifnet *ifp = vi->ifp; int rc = 0, i; struct sge_txq *txq; ASSERT_SYNCHRONIZED_OP(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) return (0); /* already running */ if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_full_init(sc)) != 0)) return (rc); /* error message displayed already */ if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_full_init(vi)) != 0)) return (rc); /* error message displayed already */ rc = update_mac_settings(ifp, XGMAC_ALL); if (rc) goto done; /* error message displayed already */ PORT_LOCK(pi); if (pi->up_vis == 0) { t4_update_port_info(pi); fixup_link_config(pi); build_medialist(pi); apply_link_config(pi); } rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); if (rc != 0) { if_printf(ifp, "enable_vi failed: %d\n", rc); PORT_UNLOCK(pi); goto done; } /* * Can't fail from this point onwards. Review cxgbe_uninit_synchronized * if this changes. */ for_each_txq(vi, i, txq) { TXQ_LOCK(txq); txq->eq.flags |= EQ_ENABLED; TXQ_UNLOCK(txq); } /* * The first iq of the first port to come up is used for tracing. */ if (sc->traceq < 0 && IS_MAIN_VI(vi)) { sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | V_QUEUENUMBER(sc->traceq)); pi->flags |= HAS_TRACEQ; } /* all ok */ pi->up_vis++; ifp->if_drv_flags |= IFF_DRV_RUNNING; if (pi->nvi > 1 || sc->flags & IS_VF) callout_reset(&vi->tick, hz, vi_tick, vi); else callout_reset(&pi->tick, hz, cxgbe_tick, pi); if (pi->link_cfg.link_ok) t4_os_link_changed(pi); PORT_UNLOCK(pi); done: if (rc != 0) cxgbe_uninit_synchronized(vi); return (rc); } /* * Idempotent. */ static int cxgbe_uninit_synchronized(struct vi_info *vi) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct ifnet *ifp = vi->ifp; int rc, i; struct sge_txq *txq; ASSERT_SYNCHRONIZED_OP(sc); if (!(vi->flags & VI_INIT_DONE)) { if (__predict_false(ifp->if_drv_flags & IFF_DRV_RUNNING)) { KASSERT(0, ("uninited VI is running")); if_printf(ifp, "uninited VI with running ifnet. " "vi->flags 0x%016lx, if_flags 0x%08x, " "if_drv_flags 0x%08x\n", vi->flags, ifp->if_flags, ifp->if_drv_flags); } return (0); } /* * Disable the VI so that all its data in either direction is discarded * by the MPS. Leave everything else (the queues, interrupts, and 1Hz * tick) intact as the TP can deliver negative advice or data that it's * holding in its RAM (for an offloaded connection) even after the VI is * disabled. */ rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); if (rc) { if_printf(ifp, "disable_vi failed: %d\n", rc); return (rc); } for_each_txq(vi, i, txq) { TXQ_LOCK(txq); txq->eq.flags &= ~EQ_ENABLED; TXQ_UNLOCK(txq); } PORT_LOCK(pi); if (pi->nvi > 1 || sc->flags & IS_VF) callout_stop(&vi->tick); else callout_stop(&pi->tick); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { PORT_UNLOCK(pi); return (0); } ifp->if_drv_flags &= ~IFF_DRV_RUNNING; pi->up_vis--; if (pi->up_vis > 0) { PORT_UNLOCK(pi); return (0); } pi->link_cfg.link_ok = false; pi->link_cfg.speed = 0; pi->link_cfg.link_down_rc = 255; t4_os_link_changed(pi); PORT_UNLOCK(pi); return (0); } /* * It is ok for this function to fail midway and return right away. t4_detach * will walk the entire sc->irq list and clean up whatever is valid. */ int t4_setup_intr_handlers(struct adapter *sc) { int rc, rid, p, q, v; char s[8]; struct irq *irq; struct port_info *pi; struct vi_info *vi; struct sge *sge = &sc->sge; struct sge_rxq *rxq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif #ifdef DEV_NETMAP struct sge_nm_rxq *nm_rxq; #endif #ifdef RSS int nbuckets = rss_getnumbuckets(); #endif /* * Setup interrupts. */ irq = &sc->irq[0]; rid = sc->intr_type == INTR_INTX ? 0 : 1; if (forwarding_intr_to_fwq(sc)) return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); /* Multiple interrupts. */ if (sc->flags & IS_VF) KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, ("%s: too few intr.", __func__)); else KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, ("%s: too few intr.", __func__)); /* The first one is always error intr on PFs */ if (!(sc->flags & IS_VF)) { rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); if (rc != 0) return (rc); irq++; rid++; } /* The second one is always the firmware event queue (first on VFs) */ rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); if (rc != 0) return (rc); irq++; rid++; for_each_port(sc, p) { pi = sc->port[p]; for_each_vi(pi, v, vi) { vi->first_intr = rid - 1; if (vi->nnmrxq > 0) { int n = max(vi->nrxq, vi->nnmrxq); rxq = &sge->rxq[vi->first_rxq]; #ifdef DEV_NETMAP nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; #endif for (q = 0; q < n; q++) { snprintf(s, sizeof(s), "%x%c%x", p, 'a' + v, q); if (q < vi->nrxq) irq->rxq = rxq++; #ifdef DEV_NETMAP if (q < vi->nnmrxq) irq->nm_rxq = nm_rxq++; if (irq->nm_rxq != NULL && irq->rxq == NULL) { /* Netmap rx only */ rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr, irq->nm_rxq, s); } if (irq->nm_rxq != NULL && irq->rxq != NULL) { /* NIC and Netmap rx */ rc = t4_alloc_irq(sc, irq, rid, t4_vi_intr, irq, s); } #endif if (irq->rxq != NULL && irq->nm_rxq == NULL) { /* NIC rx only */ rc = t4_alloc_irq(sc, irq, rid, t4_intr, irq->rxq, s); } if (rc != 0) return (rc); #ifdef RSS if (q < vi->nrxq) { bus_bind_intr(sc->dev, irq->res, rss_getcpu(q % nbuckets)); } #endif irq++; rid++; vi->nintr++; } } else { for_each_rxq(vi, q, rxq) { snprintf(s, sizeof(s), "%x%c%x", p, 'a' + v, q); rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq, s); if (rc != 0) return (rc); #ifdef RSS bus_bind_intr(sc->dev, irq->res, rss_getcpu(q % nbuckets)); #endif irq++; rid++; vi->nintr++; } } #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, q, ofld_rxq) { snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q); rc = t4_alloc_irq(sc, irq, rid, t4_intr, ofld_rxq, s); if (rc != 0) return (rc); irq++; rid++; vi->nintr++; } #endif } } MPASS(irq == &sc->irq[sc->intr_count]); return (0); } int adapter_full_init(struct adapter *sc) { int rc, i; #ifdef RSS uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; #endif ASSERT_SYNCHRONIZED_OP(sc); ADAPTER_LOCK_ASSERT_NOTOWNED(sc); KASSERT((sc->flags & FULL_INIT_DONE) == 0, ("%s: FULL_INIT_DONE already", __func__)); /* * queues that belong to the adapter (not any particular port). */ rc = t4_setup_adapter_queues(sc); if (rc != 0) goto done; for (i = 0; i < nitems(sc->tq); i++) { sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->tq[i]); if (sc->tq[i] == NULL) { device_printf(sc->dev, "failed to allocate task queue %d\n", i); rc = ENOMEM; goto done; } taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", device_get_nameunit(sc->dev), i); } #ifdef RSS MPASS(RSS_KEYSIZE == 40); rss_getkey((void *)&raw_rss_key[0]); for (i = 0; i < nitems(rss_key); i++) { rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); } t4_write_rss_key(sc, &rss_key[0], -1, 1); #endif if (!(sc->flags & IS_VF)) t4_intr_enable(sc); sc->flags |= FULL_INIT_DONE; done: if (rc != 0) adapter_full_uninit(sc); return (rc); } int adapter_full_uninit(struct adapter *sc) { int i; ADAPTER_LOCK_ASSERT_NOTOWNED(sc); t4_teardown_adapter_queues(sc); for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { taskqueue_free(sc->tq[i]); sc->tq[i] = NULL; } sc->flags &= ~FULL_INIT_DONE; return (0); } #ifdef RSS #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ RSS_HASHTYPE_RSS_UDP_IPV6) /* Translates kernel hash types to hardware. */ static int hashconfig_to_hashen(int hashconfig) { int hashen = 0; if (hashconfig & RSS_HASHTYPE_RSS_IPV4) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; if (hashconfig & RSS_HASHTYPE_RSS_IPV6) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; } if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; } if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; return (hashen); } /* Translates hardware hash types to kernel. */ static int hashen_to_hashconfig(int hashen) { int hashconfig = 0; if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { /* * If UDP hashing was enabled it must have been enabled for * either IPv4 or IPv6 (inclusive or). Enabling UDP without * enabling any 4-tuple hash is nonsense configuration. */ MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; } if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) hashconfig |= RSS_HASHTYPE_RSS_IPV4; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) hashconfig |= RSS_HASHTYPE_RSS_IPV6; return (hashconfig); } #endif int vi_full_init(struct vi_info *vi) { struct adapter *sc = vi->pi->adapter; struct ifnet *ifp = vi->ifp; uint16_t *rss; struct sge_rxq *rxq; int rc, i, j, hashen; #ifdef RSS int nbuckets = rss_getnumbuckets(); int hashconfig = rss_gethashconfig(); int extra; #endif ASSERT_SYNCHRONIZED_OP(sc); KASSERT((vi->flags & VI_INIT_DONE) == 0, ("%s: VI_INIT_DONE already", __func__)); sysctl_ctx_init(&vi->ctx); vi->flags |= VI_SYSCTL_CTX; /* * Allocate tx/rx/fl queues for this VI. */ rc = t4_setup_vi_queues(vi); if (rc != 0) goto done; /* error message displayed already */ /* * Setup RSS for this VI. Save a copy of the RSS table for later use. */ if (vi->nrxq > vi->rss_size) { if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " "some queues will never receive traffic.\n", vi->nrxq, vi->rss_size); } else if (vi->rss_size % vi->nrxq) { if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " "expect uneven traffic distribution.\n", vi->nrxq, vi->rss_size); } #ifdef RSS if (vi->nrxq != nbuckets) { if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" "performance will be impacted.\n", vi->nrxq, nbuckets); } #endif rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); for (i = 0; i < vi->rss_size;) { #ifdef RSS j = rss_get_indirection_to_bucket(i); j %= vi->nrxq; rxq = &sc->sge.rxq[vi->first_rxq + j]; rss[i++] = rxq->iq.abs_id; #else for_each_rxq(vi, j, rxq) { rss[i++] = rxq->iq.abs_id; if (i == vi->rss_size) break; } #endif } rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, vi->rss_size); if (rc != 0) { free(rss, M_CXGBE); if_printf(ifp, "rss_config failed: %d\n", rc); goto done; } #ifdef RSS hashen = hashconfig_to_hashen(hashconfig); /* * We may have had to enable some hashes even though the global config * wants them disabled. This is a potential problem that must be * reported to the user. */ extra = hashen_to_hashconfig(hashen) ^ hashconfig; /* * If we consider only the supported hash types, then the enabled hashes * are a superset of the requested hashes. In other words, there cannot * be any supported hash that was requested but not enabled, but there * can be hashes that were not requested but had to be enabled. */ extra &= SUPPORTED_RSS_HASHTYPES; MPASS((extra & hashconfig) == 0); if (extra) { if_printf(ifp, "global RSS config (0x%x) cannot be accommodated.\n", hashconfig); } if (extra & RSS_HASHTYPE_RSS_IPV4) if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_IPV6) if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); #else hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; #endif rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0); if (rc != 0) { free(rss, M_CXGBE); if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); goto done; } vi->rss = rss; vi->flags |= VI_INIT_DONE; done: if (rc != 0) vi_full_uninit(vi); return (rc); } /* * Idempotent. */ int vi_full_uninit(struct vi_info *vi) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; int i; struct sge_rxq *rxq; struct sge_txq *txq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; struct sge_wrq *ofld_txq; #endif if (vi->flags & VI_INIT_DONE) { /* Need to quiesce queues. */ /* XXX: Only for the first VI? */ if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF)) quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); for_each_txq(vi, i, txq) { quiesce_txq(sc, txq); } #ifdef TCP_OFFLOAD for_each_ofld_txq(vi, i, ofld_txq) { quiesce_wrq(sc, ofld_txq); } #endif for_each_rxq(vi, i, rxq) { quiesce_iq(sc, &rxq->iq); quiesce_fl(sc, &rxq->fl); } #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, i, ofld_rxq) { quiesce_iq(sc, &ofld_rxq->iq); quiesce_fl(sc, &ofld_rxq->fl); } #endif free(vi->rss, M_CXGBE); free(vi->nm_rss, M_CXGBE); } t4_teardown_vi_queues(vi); vi->flags &= ~VI_INIT_DONE; return (0); } static void quiesce_txq(struct adapter *sc, struct sge_txq *txq) { struct sge_eq *eq = &txq->eq; struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; (void) sc; /* unused */ #ifdef INVARIANTS TXQ_LOCK(txq); MPASS((eq->flags & EQ_ENABLED) == 0); TXQ_UNLOCK(txq); #endif /* Wait for the mp_ring to empty. */ while (!mp_ring_is_idle(txq->r)) { mp_ring_check_drainage(txq->r, 0); pause("rquiesce", 1); } /* Then wait for the hardware to finish. */ while (spg->cidx != htobe16(eq->pidx)) pause("equiesce", 1); /* Finally, wait for the driver to reclaim all descriptors. */ while (eq->cidx != eq->pidx) pause("dquiesce", 1); } static void quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) { /* XXXTX */ } static void quiesce_iq(struct adapter *sc, struct sge_iq *iq) { (void) sc; /* unused */ /* Synchronize with the interrupt handler */ while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) pause("iqfree", 1); } static void quiesce_fl(struct adapter *sc, struct sge_fl *fl) { mtx_lock(&sc->sfl_lock); FL_LOCK(fl); fl->flags |= FL_DOOMED; FL_UNLOCK(fl); callout_stop(&sc->sfl_callout); mtx_unlock(&sc->sfl_lock); KASSERT((fl->flags & FL_STARVING) == 0, ("%s: still starving", __func__)); } static int t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, driver_intr_t *handler, void *arg, char *name) { int rc; irq->rid = rid; irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, RF_SHAREABLE | RF_ACTIVE); if (irq->res == NULL) { device_printf(sc->dev, "failed to allocate IRQ for rid %d, name %s.\n", rid, name); return (ENOMEM); } rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, NULL, handler, arg, &irq->tag); if (rc != 0) { device_printf(sc->dev, "failed to setup interrupt for rid %d, name %s: %d\n", rid, name, rc); } else if (name) bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name); return (rc); } static int t4_free_irq(struct adapter *sc, struct irq *irq) { if (irq->tag) bus_teardown_intr(sc->dev, irq->res, irq->tag); if (irq->res) bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); bzero(irq, sizeof(*irq)); return (0); } static void get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) { regs->version = chip_id(sc) | chip_rev(sc) << 10; t4_get_regs(sc, buf, regs->len); } #define A_PL_INDIR_CMD 0x1f8 #define S_PL_AUTOINC 31 #define M_PL_AUTOINC 0x1U #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) #define S_PL_VFID 20 #define M_PL_VFID 0xffU #define V_PL_VFID(x) ((x) << S_PL_VFID) #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) #define S_PL_ADDR 0 #define M_PL_ADDR 0xfffffU #define V_PL_ADDR(x) ((x) << S_PL_ADDR) #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) #define A_PL_INDIR_DATA 0x1fc static uint64_t -read_vf_stat(struct adapter *sc, unsigned int viid, int reg) +read_vf_stat(struct adapter *sc, u_int vin, int reg) { u32 stats[2]; mtx_assert(&sc->reg_lock, MA_OWNED); if (sc->flags & IS_VF) { stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); } else { t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | - V_PL_VFID(G_FW_VIID_VIN(viid)) | - V_PL_ADDR(VF_MPS_REG(reg))); + V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg))); stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); } return (((uint64_t)stats[1]) << 32 | stats[0]); } static void -t4_get_vi_stats(struct adapter *sc, unsigned int viid, - struct fw_vi_stats_vf *stats) +t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats) { #define GET_STAT(name) \ - read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) + read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L) stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); #undef GET_STAT } static void -t4_clr_vi_stats(struct adapter *sc, unsigned int viid) +t4_clr_vi_stats(struct adapter *sc, u_int vin) { int reg; - t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | - V_PL_VFID(G_FW_VIID_VIN(viid)) | + t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) t4_write_reg(sc, A_PL_INDIR_DATA, 0); } static void vi_refresh_stats(struct adapter *sc, struct vi_info *vi) { struct timeval tv; const struct timeval interval = {0, 250000}; /* 250ms */ if (!(vi->flags & VI_INIT_DONE)) return; getmicrotime(&tv); timevalsub(&tv, &interval); if (timevalcmp(&tv, &vi->last_refreshed, <)) return; mtx_lock(&sc->reg_lock); - t4_get_vi_stats(sc, vi->viid, &vi->stats); + t4_get_vi_stats(sc, vi->vin, &vi->stats); getmicrotime(&vi->last_refreshed); mtx_unlock(&sc->reg_lock); } static void cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) { u_int i, v, tnl_cong_drops, bg_map; struct timeval tv; const struct timeval interval = {0, 250000}; /* 250ms */ getmicrotime(&tv); timevalsub(&tv, &interval); if (timevalcmp(&tv, &pi->last_refreshed, <)) return; tnl_cong_drops = 0; t4_get_port_stats(sc, pi->tx_chan, &pi->stats); bg_map = pi->mps_bg_map; while (bg_map) { i = ffs(bg_map) - 1; mtx_lock(&sc->reg_lock); t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1, A_TP_MIB_TNL_CNG_DROP_0 + i); mtx_unlock(&sc->reg_lock); tnl_cong_drops += v; bg_map &= ~(1 << i); } pi->tnl_cong_drops = tnl_cong_drops; getmicrotime(&pi->last_refreshed); } static void cxgbe_tick(void *arg) { struct port_info *pi = arg; struct adapter *sc = pi->adapter; PORT_LOCK_ASSERT_OWNED(pi); cxgbe_refresh_stats(sc, pi); callout_schedule(&pi->tick, hz); } void vi_tick(void *arg) { struct vi_info *vi = arg; struct adapter *sc = vi->pi->adapter; vi_refresh_stats(sc, vi); callout_schedule(&vi->tick, hz); } /* * Should match fw_caps_config_ enums in t4fw_interface.h */ static char *caps_decoder[] = { "\20\001IPMI\002NCSI", /* 0: NBM */ "\20\001PPP\002QFC\003DCBX", /* 1: link */ "\20\001INGRESS\002EGRESS", /* 2: switch */ "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ "\006HASHFILTER\007ETHOFLD", "\20\001TOE", /* 4: TOE */ "\20\001RDDP\002RDMAC", /* 5: RDMA */ "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" "\007T10DIF" "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */ "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ "\004PO_INITIATOR\005PO_TARGET", }; void t4_sysctls(struct adapter *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children, *c0; static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; ctx = device_get_sysctl_ctx(sc->dev); /* * dev.t4nex.X. */ oid = device_get_sysctl_tree(sc->dev); c0 = children = SYSCTL_CHILDREN(oid); sc->sc_do_rxcopy = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, sc->params.nports, "# of ports"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", CTLTYPE_STRING | CTLFLAG_RD, doorbells, (uintptr_t)&sc->doorbells, sysctl_bitfield_8b, "A", "available doorbells"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, sc->params.vpd.cclk, "core clock frequency (in KHz)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", "interrupt holdoff timer values (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", "interrupt holdoff packet counter values"); t4_sge_sysctls(sc, ctx, children); sc->lro_timeout = 100; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, &sc->debug_flags, 0, "flags to enable runtime debugging"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version, 0, "firmware version"); if (sc->flags & IS_VF) return; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, NULL, chip_rev(sc), "chip hardware revision"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version", CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, sc->er_version, 0, "expansion ROM version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, sc->bs_version, 0, "bootstrap firmware version"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, NULL, sc->params.scfg_vers, "serial config version"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, NULL, sc->params.vpd_vers, "VPD version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, sc->cfcsum, "config file checksum"); #define SYSCTL_CAP(name, n, text) \ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], (uintptr_t)&sc->name, \ sysctl_bitfield_16b, "A", "available " text " capabilities") SYSCTL_CAP(nbmcaps, 0, "NBM"); SYSCTL_CAP(linkcaps, 1, "link"); SYSCTL_CAP(switchcaps, 2, "switch"); SYSCTL_CAP(niccaps, 3, "NIC"); SYSCTL_CAP(toecaps, 4, "TCP offload"); SYSCTL_CAP(rdmacaps, 5, "RDMA"); SYSCTL_CAP(iscsicaps, 6, "iSCSI"); SYSCTL_CAP(cryptocaps, 7, "crypto"); SYSCTL_CAP(fcoecaps, 8, "FCoE"); #undef SYSCTL_CAP SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, NULL, sc->tids.nftids, "number of filters"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD, sc, 0, sysctl_temperature, "I", "chip temperature (in Celsius)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_loadavg, "A", "microprocessor load averages (debug firmwares only)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_vdd", CTLFLAG_RD, &sc->params.core_vdd, 0, "core Vdd (in mV)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus", CTLTYPE_STRING | CTLFLAG_RD, sc, LOCAL_CPUS, sysctl_cpus, "A", "local CPUs"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus", CTLTYPE_STRING | CTLFLAG_RD, sc, INTR_CPUS, sysctl_cpus, "A", "preferred CPUs for interrupts"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW, &sc->swintr, 0, "software triggered interrupts"); /* * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. */ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "logs and miscellaneous information"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_cctrl, "A", "congestion control"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", CTLTYPE_STRING | CTLFLAG_RD, sc, 3, sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", CTLTYPE_STRING | CTLFLAG_RD, sc, 4, sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", CTLTYPE_STRING | CTLFLAG_RD, sc, 5, sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_cim_la, "A", "CIM logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); if (chip_id(sc) > CHELSIO_T4) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); } SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_cim_qcfg, "A", "CIM queue configuration"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_cpl_stats, "A", "CPL statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_ddp_stats, "A", "non-TCP DDP statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_devlog, "A", "firmware's device log"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_fcoe_stats, "A", "FCoE statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_hw_sched, "A", "hardware scheduler "); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_l2t, "A", "hardware L2 table"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_smt, "A", "hardware source MAC table"); #ifdef INET6 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "clip", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_clip, "A", "active CLIP table entries"); #endif SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_lb_stats, "A", "loopback statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_meminfo, "A", "memory regions"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, "A", "MPS TCAM entries"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_path_mtus, "A", "path MTUs"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_pm_stats, "A", "PM statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_rdma_stats, "A", "RDMA statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tcp_stats, "A", "TCP statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tids, "A", "TID information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_err_stats, "A", "TP error statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I", "TP logic analyzer event capture mask"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_la, "A", "TP logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tx_rate, "A", "Tx rate"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_ulprx_la, "A", "ULPRX logic analyzer"); if (chip_id(sc) >= CHELSIO_T5) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_wcwr_stats, "A", "write combined work requests"); } #ifdef TCP_OFFLOAD if (is_offload(sc)) { int i; char s[4]; /* * dev.t4nex.X.toe. */ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, NULL, "TOE parameters"); children = SYSCTL_CHILDREN(oid); sc->tt.cong_algorithm = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm", CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control " "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, " "3 = highspeed)"); sc->tt.sndbuf = 256 * 1024; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, &sc->tt.sndbuf, 0, "max hardware send buffer size"); sc->tt.ddp = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, &sc->tt.ddp, 0, "DDP allowed"); sc->tt.rx_coalesce = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); sc->tt.tls = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tls", CTLFLAG_RW, &sc->tt.tls, 0, "Inline TLS allowed"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tls_rx_ports, "I", "TCP ports that use inline TLS+TOE RX"); sc->tt.tx_align = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); sc->tt.tx_zcopy = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy", CTLFLAG_RW, &sc->tt.tx_zcopy, 0, "Enable zero-copy aio_write(2)"); sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cop_managed_offloading", CTLFLAG_RW, &sc->tt.cop_managed_offloading, 0, "COP (Connection Offload Policy) controls all TOE offload"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A", "TP timer tick (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A", "TCP timestamp tick (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A", "DACK tick (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer, "IU", "DACK timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN, sysctl_tp_timer, "LU", "Minimum retransmit interval (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX, sysctl_tp_timer, "LU", "Maximum retransmit interval (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN, sysctl_tp_timer, "LU", "Persist timer min (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX, sysctl_tp_timer, "LU", "Persist timer max (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE, sysctl_tp_timer, "LU", "Keepalive idle timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval", CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL, sysctl_tp_timer, "LU", "Keepalive interval timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT, sysctl_tp_timer, "LU", "Initial SRTT (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER, sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count", CTLTYPE_UINT | CTLFLAG_RD, sc, S_SYNSHIFTMAX, sysctl_tp_shift_cnt, "IU", "Number of SYN retransmissions before abort"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count", CTLTYPE_UINT | CTLFLAG_RD, sc, S_RXTSHIFTMAXR2, sysctl_tp_shift_cnt, "IU", "Number of retransmissions before abort"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count", CTLTYPE_UINT | CTLFLAG_RD, sc, S_KEEPALIVEMAXR2, sysctl_tp_shift_cnt, "IU", "Number of keepalive probes before abort"); oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff", CTLFLAG_RD, NULL, "TOE retransmit backoffs"); children = SYSCTL_CHILDREN(oid); for (i = 0; i < 16; i++) { snprintf(s, sizeof(s), "%u", i); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s, CTLTYPE_UINT | CTLFLAG_RD, sc, i, sysctl_tp_backoff, "IU", "TOE retransmit backoff"); } } #endif } void vi_sysctls(struct vi_info *vi) { struct sysctl_ctx_list *ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children; ctx = device_get_sysctl_ctx(vi->dev); /* * dev.v?(cxgbe|cxl).X. */ oid = device_get_sysctl_tree(vi->dev); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, vi->viid, "VI identifer"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, &vi->nrxq, 0, "# of rx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, &vi->ntxq, 0, "# of tx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, &vi->first_rxq, 0, "index of first rx queue"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, &vi->first_txq, 0, "index of first tx queue"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_base", CTLFLAG_RD, NULL, vi->rss_base, "start of RSS indirection table"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, vi->rss_size, "size of RSS indirection table"); if (IS_MAIN_VI(vi)) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", "Reserve queue 0 for non-flowid packets"); } #ifdef TCP_OFFLOAD if (vi->nofldrxq != 0) { SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, &vi->nofldrxq, 0, "# of rx queues for offloaded TCP connections"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, &vi->nofldtxq, 0, "# of tx queues for offloaded TCP connections"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", CTLFLAG_RD, &vi->first_ofld_rxq, 0, "index of first TOE rx queue"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", CTLFLAG_RD, &vi->first_ofld_txq, 0, "index of first TOE tx queue"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld", CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx_ofld, "I", "holdoff timer index for TOE queues"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld", CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx_ofld, "I", "holdoff packet counter index for TOE queues"); } #endif #ifdef DEV_NETMAP if (vi->nnmrxq != 0) { SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, &vi->nnmrxq, 0, "# of netmap rx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, &vi->nnmtxq, 0, "# of netmap tx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", CTLFLAG_RD, &vi->first_nm_rxq, 0, "index of first netmap rx queue"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", CTLFLAG_RD, &vi->first_nm_txq, 0, "index of first netmap tx queue"); } #endif SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", "holdoff timer index"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", "holdoff packet counter index"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", "rx queue size"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", "tx queue size"); } static void cxgbe_sysctls(struct port_info *pi) { struct sysctl_ctx_list *ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children, *children2; struct adapter *sc = pi->adapter; int i; char name[16]; static char *tc_flags = {"\20\1USER\2SYNC\3ASYNC\4ERR"}; ctx = device_get_sysctl_ctx(pi->dev); /* * dev.cxgbe.X. */ oid = device_get_sysctl_tree(pi->dev); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", "PHY temperature (in Celsius)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", "PHY firmware version"); } SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A", "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec", CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A", "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg", CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I", "autonegotiation (-1 = not supported)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, port_top_speed(pi), "max speed (in Gbps)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL, pi->mps_bg_map, "MPS buffer group map"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD, NULL, pi->rx_e_chan_map, "TP rx e-channel map"); if (sc->flags & IS_VF) return; /* * dev.(cxgbe|cxl).X.tc. */ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL, "Tx scheduler traffic classes (cl_rl)"); children2 = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize", CTLFLAG_RW, &pi->sched_params->pktsize, 0, "pktsize for per-flow cl-rl (0 means up to the driver )"); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize", CTLFLAG_RW, &pi->sched_params->burstsize, 0, "burstsize for per-flow cl-rl (0 means up to the driver)"); for (i = 0; i < sc->chip_params->nsched_cls; i++) { struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i]; snprintf(name, sizeof(name), "%d", i); children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, "traffic class")); SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags", CTLTYPE_STRING | CTLFLAG_RD, tc_flags, (uintptr_t)&tc->flags, sysctl_bitfield_8b, "A", "flags"); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", CTLFLAG_RD, &tc->refcount, 0, "references to this class"); SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i, sysctl_tc_params, "A", "traffic class parameters"); } /* * dev.cxgbe.X.stats. */ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, NULL, "port statistics"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, &pi->tx_parse_error, 0, "# of tx packets with invalid length or # of segments"); #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ sysctl_handle_t4_reg64, "QU", desc) SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", "# of tx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", "# of tx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", "# of tx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", "# of tx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", "# of tx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", "# of tx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", "# of tx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", "# of frames received with bad FCS", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); SYSCTL_ADD_T4_REG64(pi, "rx_len_err", "# of frames received with length error", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", "# of rx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", "# of rx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", "# of rx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", "# of rx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", "# of rx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", "# of rx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", "# of rx frames in this range", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); #undef SYSCTL_ADD_T4_REG64 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ &pi->stats.name, desc) /* We get these from port_stats and they may be stale by up to 1s */ SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows"); SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows"); SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows"); SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows"); SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets"); SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets"); SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets"); SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets"); #undef SYSCTL_ADD_T4_PORTSTAT SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_records", CTLFLAG_RD, &pi->tx_tls_records, "# of TLS records transmitted"); SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_octets", CTLFLAG_RD, &pi->tx_tls_octets, "# of payload octets in transmitted TLS records"); SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_records", CTLFLAG_RD, &pi->rx_tls_records, "# of TLS records received"); SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_octets", CTLFLAG_RD, &pi->rx_tls_octets, "# of payload octets in received TLS records"); } static int sysctl_int_array(SYSCTL_HANDLER_ARGS) { int rc, *i, space = 0; struct sbuf sb; sbuf_new_for_sysctl(&sb, NULL, 64, req); for (i = arg1; arg2; arg2 -= sizeof(int), i++) { if (space) sbuf_printf(&sb, " "); sbuf_printf(&sb, "%d", *i); space = 1; } rc = sbuf_finish(&sb); sbuf_delete(&sb); return (rc); } static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS) { int rc; struct sbuf *sb; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS) { int rc; struct sbuf *sb; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_btphy(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; int op = arg2; struct adapter *sc = pi->adapter; u_int v; int rc; rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); if (rc) return (rc); /* XXX: magic numbers */ rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, &v); end_synchronized_op(sc, 0); if (rc) return (rc); if (op == 0) v /= 256; rc = sysctl_handle_int(oidp, &v, 0, req); return (rc); } static int sysctl_noflowq(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; int rc, val; val = vi->rsrv_noflowq; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if ((val >= 1) && (vi->ntxq > 1)) vi->rsrv_noflowq = 1; else vi->rsrv_noflowq = 0; return (rc); } static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->pi->adapter; int idx, rc, i; struct sge_rxq *rxq; uint8_t v; idx = vi->tmr_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < 0 || idx >= SGE_NTIMERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4tmr"); if (rc) return (rc); v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); for_each_rxq(vi, i, rxq) { #ifdef atomic_store_rel_8 atomic_store_rel_8(&rxq->iq.intr_params, v); #else rxq->iq.intr_params = v; #endif } vi->tmr_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (0); } static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->pi->adapter; int idx, rc; idx = vi->pktc_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < -1 || idx >= SGE_NCOUNTERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4pktc"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->pktc_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->pi->adapter; int qsize, rc; qsize = vi->qsize_rxq; rc = sysctl_handle_int(oidp, &qsize, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (qsize < 128 || (qsize & 7)) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4rxqs"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->qsize_rxq = qsize; end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->pi->adapter; int qsize, rc; qsize = vi->qsize_txq; rc = sysctl_handle_int(oidp, &qsize, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (qsize < 128 || qsize > 65536) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4txqs"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->qsize_txq = qsize; end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; if (req->newptr == NULL) { struct sbuf *sb; static char *bits = "\20\1RX\2TX\3AUTO"; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); if (lc->link_ok) { sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) | (lc->requested_fc & PAUSE_AUTONEG), bits); } else { sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG), bits); } rc = sbuf_finish(sb); sbuf_delete(sb); } else { char s[2]; int n; s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)); s[1] = 0; rc = sysctl_handle_string(oidp, s, sizeof(s), req); if (rc != 0) return(rc); if (s[1] != 0) return (EINVAL); if (s[0] < '0' || s[0] > '9') return (EINVAL); /* not a number */ n = s[0] - '0'; if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) return (EINVAL); /* some other bit is set too */ rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4PAUSE"); if (rc) return (rc); PORT_LOCK(pi); lc->requested_fc = n; fixup_link_config(pi); if (pi->up_vis > 0) rc = apply_link_config(pi); set_current_media(pi); PORT_UNLOCK(pi); end_synchronized_op(sc, 0); } return (rc); } static int sysctl_fec(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; int8_t old; if (req->newptr == NULL) { struct sbuf *sb; static char *bits = "\20\1RS\2BASE-R\3RSVD1\4RSVD2\5RSVD3\6AUTO"; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); /* * Display the requested_fec when the link is down -- the actual * FEC makes sense only when the link is up. */ if (lc->link_ok) { sbuf_printf(sb, "%b", (lc->fec & M_FW_PORT_CAP32_FEC) | (lc->requested_fec & FEC_AUTO), bits); } else { sbuf_printf(sb, "%b", lc->requested_fec, bits); } rc = sbuf_finish(sb); sbuf_delete(sb); } else { char s[3]; int n; snprintf(s, sizeof(s), "%d", lc->requested_fec == FEC_AUTO ? -1 : lc->requested_fec & M_FW_PORT_CAP32_FEC); rc = sysctl_handle_string(oidp, s, sizeof(s), req); if (rc != 0) return(rc); n = strtol(&s[0], NULL, 0); if (n < 0 || n & FEC_AUTO) n = FEC_AUTO; else { if (n & ~M_FW_PORT_CAP32_FEC) return (EINVAL);/* some other bit is set too */ if (!powerof2(n)) return (EINVAL);/* one bit can be set at most */ } rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4fec"); if (rc) return (rc); PORT_LOCK(pi); old = lc->requested_fec; if (n == FEC_AUTO) lc->requested_fec = FEC_AUTO; else if (n == 0) lc->requested_fec = FEC_NONE; else { if ((lc->supported | V_FW_PORT_CAP32_FEC(n)) != lc->supported) { rc = ENOTSUP; goto done; } lc->requested_fec = n; } fixup_link_config(pi); if (pi->up_vis > 0) { rc = apply_link_config(pi); if (rc != 0) { lc->requested_fec = old; if (rc == FW_EPROTO) rc = ENOTSUP; } } done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); } return (rc); } static int sysctl_autoneg(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc, val; if (lc->supported & FW_PORT_CAP32_ANEG) val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1; else val = -1; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (val == 0) val = AUTONEG_DISABLE; else if (val == 1) val = AUTONEG_ENABLE; else val = AUTONEG_AUTO; rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4aneg"); if (rc) return (rc); PORT_LOCK(pi); if (val == AUTONEG_ENABLE && !(lc->supported & FW_PORT_CAP32_ANEG)) { rc = ENOTSUP; goto done; } lc->requested_aneg = val; fixup_link_config(pi); if (pi->up_vis > 0) rc = apply_link_config(pi); set_current_media(pi); done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); return (rc); } static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int reg = arg2; uint64_t val; val = t4_read_reg64(sc, reg); return (sysctl_handle_64(oidp, &val, 0, req)); } static int sysctl_temperature(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, t; uint32_t param, val; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); if (rc) return (rc); param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); end_synchronized_op(sc, 0); if (rc) return (rc); /* unknown is returned as 0 but we display -1 in that case */ t = val == 0 ? -1 : val; rc = sysctl_handle_int(oidp, &t, 0, req); return (rc); } static int sysctl_loadavg(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint32_t param, val; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg"); if (rc) return (rc); param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); end_synchronized_op(sc, 0); if (rc) return (rc); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); if (val == 0xffffffff) { /* Only debug and custom firmwares report load averages. */ sbuf_printf(sb, "not available"); } else { sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff, (val >> 16) & 0xff); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_cctrl(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; uint16_t incr[NMTUS][NCCTRL_WIN]; static const char *dec_fac[] = { "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", "0.9375" }; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); t4_read_cong_tbl(sc, incr); for (i = 0; i < NCCTRL_WIN; ++i) { sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], incr[5][i], incr[6][i], incr[7][i]); sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", incr[8][i], incr[9][i], incr[10][i], incr[11][i], incr[12][i], incr[13][i], incr[14][i], incr[15][i], sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ }; static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i, n, qid = arg2; uint32_t *buf, *p; char *qtype; u_int cim_num_obq = sc->chip_params->cim_num_obq; KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, ("%s: bad qid %d\n", __func__, qid)); if (qid < CIM_NUM_IBQ) { /* inbound queue */ qtype = "IBQ"; n = 4 * CIM_IBQ_SIZE; buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); rc = t4_read_cim_ibq(sc, qid, buf, n); } else { /* outbound queue */ qtype = "OBQ"; qid -= CIM_NUM_IBQ; n = 4 * cim_num_obq * CIM_OBQ_SIZE; buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); rc = t4_read_cim_obq(sc, qid, buf, n); } if (rc < 0) { rc = -rc; goto done; } n = rc * sizeof(uint32_t); /* rc has # of words actually read */ rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) goto done; sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); if (sb == NULL) { rc = ENOMEM; goto done; } sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); for (i = 0, p = buf; i < n; i += 16, p += 4) sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], p[2], p[3]); rc = sbuf_finish(sb); sbuf_delete(sb); done: free(buf, M_CXGBE); return (rc); } static void sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg) { uint32_t *p; sbuf_printf(sb, "Status Data PC%s", cfg & F_UPDBGLACAPTPCONLY ? "" : " LS0Stat LS0Addr LS0Data"); for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { if (cfg & F_UPDBGLACAPTPCONLY) { sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, p[6], p[7]); sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, p[4] & 0xff, p[5] >> 8); sbuf_printf(sb, "\n %02x %x%07x %x%07x", (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4); } else { sbuf_printf(sb, "\n %02x %x%07x %x%07x %08x %08x " "%08x%08x%08x%08x", (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], p[6], p[7]); } } } static void sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg) { uint32_t *p; sbuf_printf(sb, "Status Inst Data PC%s", cfg & F_UPDBGLACAPTPCONLY ? "" : " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { if (cfg & F_UPDBGLACAPTPCONLY) { sbuf_printf(sb, "\n %02x %08x %08x %08x", p[3] & 0xff, p[2], p[1], p[0]); sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, p[6] >> 16); } else { sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " "%08x %08x %08x %08x %08x %08x", (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, p[6] >> 16, p[2], p[1], p[0], p[5], p[4], p[3]); } } } static int sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags) { uint32_t cfg, *buf; int rc; rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); if (rc != 0) return (rc); MPASS(flags == M_WAITOK || flags == M_NOWAIT); buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, M_ZERO | flags); if (buf == NULL) return (ENOMEM); rc = -t4_cim_read_la(sc, buf, NULL); if (rc != 0) goto done; if (chip_id(sc) < CHELSIO_T6) sbuf_cim_la4(sc, sb, buf, cfg); else sbuf_cim_la6(sc, sb, buf, cfg); done: free(buf, M_CXGBE); return (rc); } static int sysctl_cim_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); rc = sbuf_cim_la(sc, sb, M_WAITOK); if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } bool t4_os_dump_cimla(struct adapter *sc, int arg, bool verbose) { struct sbuf sb; int rc; if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) return (false); rc = sbuf_cim_la(sc, &sb, M_NOWAIT); if (rc == 0) { rc = sbuf_finish(&sb); if (rc == 0) { log(LOG_DEBUG, "%s: CIM LA dump follows.\n%s", device_get_nameunit(sc->dev), sbuf_data(&sb)); } } sbuf_delete(&sb); return (false); } static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; u_int i; struct sbuf *sb; uint32_t *buf, *p; int rc; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); p = buf; for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], p[1], p[0]); } sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, (p[1] >> 2) | ((p[2] & 3) << 30), (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, p[0] & 1); } rc = sbuf_finish(sb); sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; u_int i; struct sbuf *sb; uint32_t *buf, *p; int rc; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); p = buf; sbuf_printf(sb, "Cntl ID DataBE Addr Data"); for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]); } sbuf_printf(sb, "\n\nCntl ID Data"); for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); } rc = sbuf_finish(sb); sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; uint16_t thres[CIM_NUM_IBQ]; uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; cim_num_obq = sc->chip_params->cim_num_obq; if (is_t4(sc)) { ibq_rdaddr = A_UP_IBQ_0_RDADDR; obq_rdaddr = A_UP_OBQ_0_REALADDR; } else { ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; } nq = CIM_NUM_IBQ + cim_num_obq; rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); if (rc == 0) rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); if (rc != 0) return (rc); t4_read_cimq_cfg(sc, base, size, thres); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), G_QUEREMFLITS(p[2]) * 16); for ( ; i < nq; i++, p += 4, wr += 2) sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), G_QUEREMFLITS(p[2]) * 16); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_cpl_stats stats; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); t4_tp_get_cpl_stats(sc, &stats, 0); mtx_unlock(&sc->reg_lock); if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3"); sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", stats.req[0], stats.req[1], stats.req[2], stats.req[3]); sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); } else { sbuf_printf(sb, " channel 0 channel 1"); sbuf_printf(sb, "\nCPL requests: %10u %10u", stats.req[0], stats.req[1]); sbuf_printf(sb, "\nCPL responses: %10u %10u", stats.rsp[0], stats.rsp[1]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_usm_stats stats; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); t4_get_usm_stats(sc, &stats, 1); sbuf_printf(sb, "Frames: %u\n", stats.frames); sbuf_printf(sb, "Octets: %ju\n", stats.octets); sbuf_printf(sb, "Drops: %u", stats.drops); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static const char * const devlog_level_strings[] = { [FW_DEVLOG_LEVEL_EMERG] = "EMERG", [FW_DEVLOG_LEVEL_CRIT] = "CRIT", [FW_DEVLOG_LEVEL_ERR] = "ERR", [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", [FW_DEVLOG_LEVEL_INFO] = "INFO", [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" }; static const char * const devlog_facility_strings[] = { [FW_DEVLOG_FACILITY_CORE] = "CORE", [FW_DEVLOG_FACILITY_CF] = "CF", [FW_DEVLOG_FACILITY_SCHED] = "SCHED", [FW_DEVLOG_FACILITY_TIMER] = "TIMER", [FW_DEVLOG_FACILITY_RES] = "RES", [FW_DEVLOG_FACILITY_HW] = "HW", [FW_DEVLOG_FACILITY_FLR] = "FLR", [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", [FW_DEVLOG_FACILITY_PHY] = "PHY", [FW_DEVLOG_FACILITY_MAC] = "MAC", [FW_DEVLOG_FACILITY_PORT] = "PORT", [FW_DEVLOG_FACILITY_VI] = "VI", [FW_DEVLOG_FACILITY_FILTER] = "FILTER", [FW_DEVLOG_FACILITY_ACL] = "ACL", [FW_DEVLOG_FACILITY_TM] = "TM", [FW_DEVLOG_FACILITY_QFC] = "QFC", [FW_DEVLOG_FACILITY_DCB] = "DCB", [FW_DEVLOG_FACILITY_ETH] = "ETH", [FW_DEVLOG_FACILITY_OFLD] = "OFLD", [FW_DEVLOG_FACILITY_RI] = "RI", [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", [FW_DEVLOG_FACILITY_FCOE] = "FCOE", [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", [FW_DEVLOG_FACILITY_CHNET] = "CHNET", }; static int sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags) { int i, j, rc, nentries, first = 0; struct devlog_params *dparams = &sc->params.devlog; struct fw_devlog_e *buf, *e; uint64_t ftstamp = UINT64_MAX; if (dparams->addr == 0) return (ENXIO); MPASS(flags == M_WAITOK || flags == M_NOWAIT); buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags); if (buf == NULL) return (ENOMEM); rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); if (rc != 0) goto done; nentries = dparams->size / sizeof(struct fw_devlog_e); for (i = 0; i < nentries; i++) { e = &buf[i]; if (e->timestamp == 0) break; /* end */ e->timestamp = be64toh(e->timestamp); e->seqno = be32toh(e->seqno); for (j = 0; j < 8; j++) e->params[j] = be32toh(e->params[j]); if (e->timestamp < ftstamp) { ftstamp = e->timestamp; first = i; } } if (buf[first].timestamp == 0) goto done; /* nothing in the log */ sbuf_printf(sb, "%10s %15s %8s %8s %s\n", "Seq#", "Tstamp", "Level", "Facility", "Message"); i = first; do { e = &buf[i]; if (e->timestamp == 0) break; /* end */ sbuf_printf(sb, "%10d %15ju %8s %8s ", e->seqno, e->timestamp, (e->level < nitems(devlog_level_strings) ? devlog_level_strings[e->level] : "UNKNOWN"), (e->facility < nitems(devlog_facility_strings) ? devlog_facility_strings[e->facility] : "UNKNOWN")); sbuf_printf(sb, e->fmt, e->params[0], e->params[1], e->params[2], e->params[3], e->params[4], e->params[5], e->params[6], e->params[7]); if (++i == nentries) i = 0; } while (i != first); done: free(buf, M_CXGBE); return (rc); } static int sysctl_devlog(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc; struct sbuf *sb; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); rc = sbuf_devlog(sc, sb, M_WAITOK); if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } void t4_os_dump_devlog(struct adapter *sc) { int rc; struct sbuf sb; if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) return; rc = sbuf_devlog(sc, &sb, M_NOWAIT); if (rc == 0) { rc = sbuf_finish(&sb); if (rc == 0) { log(LOG_DEBUG, "%s: device log follows.\n%s", device_get_nameunit(sc->dev), sbuf_data(&sb)); } } sbuf_delete(&sb); } static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_fcoe_stats stats[MAX_NCHAN]; int i, nchan = sc->chip_params->nchan; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); for (i = 0; i < nchan; i++) t4_get_fcoe_stats(sc, i, &stats[i], 1); if (nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3"); sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", stats[0].octets_ddp, stats[1].octets_ddp, stats[2].octets_ddp, stats[3].octets_ddp); sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", stats[0].frames_ddp, stats[1].frames_ddp, stats[2].frames_ddp, stats[3].frames_ddp); sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", stats[0].frames_drop, stats[1].frames_drop, stats[2].frames_drop, stats[3].frames_drop); } else { sbuf_printf(sb, " channel 0 channel 1"); sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", stats[0].octets_ddp, stats[1].octets_ddp); sbuf_printf(sb, "\nframesDDP: %16u %16u", stats[0].frames_ddp, stats[1].frames_ddp); sbuf_printf(sb, "\nframesDrop: %16u %16u", stats[0].frames_drop, stats[1].frames_drop); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; unsigned int map, kbps, ipg, mode; unsigned int pace_tab[NTX_SCHED]; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); t4_read_pace_tbl(sc, pace_tab); sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " "Class IPG (0.1 ns) Flow IPG (us)"); for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { t4_get_tx_sched(sc, i, &kbps, &ipg, 1); sbuf_printf(sb, "\n %u %-5s %u ", i, (mode & (1 << i)) ? "flow" : "class", map & 3); if (kbps) sbuf_printf(sb, "%9u ", kbps); else sbuf_printf(sb, " disabled "); if (ipg) sbuf_printf(sb, "%13u ", ipg); else sbuf_printf(sb, " disabled "); if (pace_tab[i]) sbuf_printf(sb, "%10u", pace_tab[i]); else sbuf_printf(sb, " disabled"); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i, j; uint64_t *p0, *p1; struct lb_port_stats s[2]; static const char *stat_name[] = { "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", "Frames128To255:", "Frames256To511:", "Frames512To1023:", "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", "BG2FramesTrunc:", "BG3FramesTrunc:" }; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); memset(s, 0, sizeof(s)); for (i = 0; i < sc->chip_params->nchan; i += 2) { t4_get_lb_stats(sc, i, &s[0]); t4_get_lb_stats(sc, i + 1, &s[1]); p0 = &s[0].octets; p1 = &s[1].octets; sbuf_printf(sb, "%s Loopback %u" " Loopback %u", i == 0 ? "" : "\n", i, i + 1); for (j = 0; j < nitems(stat_name); j++) sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], *p0++, *p1++); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) { int rc = 0; struct port_info *pi = arg1; struct link_config *lc = &pi->link_cfg; struct sbuf *sb; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); if (sb == NULL) return (ENOMEM); if (lc->link_ok || lc->link_down_rc == 255) sbuf_printf(sb, "n/a"); else sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } struct mem_desc { unsigned int base; unsigned int limit; unsigned int idx; }; static int mem_desc_cmp(const void *a, const void *b) { return ((const struct mem_desc *)a)->base - ((const struct mem_desc *)b)->base; } static void mem_region_show(struct sbuf *sb, const char *name, unsigned int from, unsigned int to) { unsigned int size; if (from == to) return; size = to - from + 1; if (size == 0) return; /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); } static int sysctl_meminfo(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i, n; uint32_t lo, hi, used, alloc; static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; static const char *region[] = { "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", "TDDP region:", "TPT region:", "STAG region:", "RQ region:", "RQUDP region:", "PBL region:", "TXPBL region:", "DBVFIFO region:", "ULPRX state:", "ULPTX state:", "On-chip queues:", "TLS keys:", }; struct mem_desc avail[4]; struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ struct mem_desc *md = mem; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); for (i = 0; i < nitems(mem); i++) { mem[i].limit = 0; mem[i].idx = i; } /* Find and sort the populated memory ranges */ i = 0; lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); if (lo & F_EDRAM0_ENABLE) { hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); avail[i].base = G_EDRAM0_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); avail[i].idx = 0; i++; } if (lo & F_EDRAM1_ENABLE) { hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); avail[i].base = G_EDRAM1_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); avail[i].idx = 1; i++; } if (lo & F_EXT_MEM_ENABLE) { hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); avail[i].base = G_EXT_MEM_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20); avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ i++; } if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); avail[i].base = G_EXT_MEM1_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20); avail[i].idx = 4; i++; } if (!i) /* no memory available */ return 0; qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); /* the next few have explicit upper bounds */ md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); md->limit = md->base - 1 + t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); md++; md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); md->limit = md->base - 1 + t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); md++; if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { if (chip_id(sc) <= CHELSIO_T5) md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); else md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); md->limit = 0; } else { md->base = 0; md->idx = nitems(region); /* hide it */ } md++; #define ulp_region(reg) \ md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) ulp_region(RX_ISCSI); ulp_region(RX_TDDP); ulp_region(TX_TPT); ulp_region(RX_STAG); ulp_region(RX_RQ); ulp_region(RX_RQUDP); ulp_region(RX_PBL); ulp_region(TX_PBL); #undef ulp_region md->base = 0; md->idx = nitems(region); if (!is_t4(sc)) { uint32_t size = 0; uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); if (is_t5(sc)) { if (sge_ctrl & F_VFIFO_ENABLE) size = G_DBVFIFO_SIZE(fifo_size); } else size = G_T6_DBVFIFO_SIZE(fifo_size); if (size) { md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR)); md->limit = md->base + (size << 2) - 1; } } md++; md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); md->limit = 0; md++; md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); md->limit = 0; md++; md->base = sc->vres.ocq.start; if (sc->vres.ocq.size) md->limit = md->base + sc->vres.ocq.size - 1; else md->idx = nitems(region); /* hide it */ md++; md->base = sc->vres.key.start; if (sc->vres.key.size) md->limit = md->base + sc->vres.key.size - 1; else md->idx = nitems(region); /* hide it */ md++; /* add any address-space holes, there can be up to 3 */ for (n = 0; n < i - 1; n++) if (avail[n].limit < avail[n + 1].base) (md++)->base = avail[n].limit; if (avail[n].limit) (md++)->base = avail[n].limit; n = md - mem; qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); for (lo = 0; lo < i; lo++) mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, avail[lo].limit - 1); sbuf_printf(sb, "\n"); for (i = 0; i < n; i++) { if (mem[i].idx >= nitems(region)) continue; /* skip holes */ if (!mem[i].limit) mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; mem_region_show(sb, region[mem[i].idx], mem[i].base, mem[i].limit); } sbuf_printf(sb, "\n"); lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; mem_region_show(sb, "uP RAM:", lo, hi); lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; mem_region_show(sb, "uP Extmem2:", lo, hi); lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", G_PMRXMAXPAGE(lo), t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, (lo & F_PMRXNUMCHN) ? 2 : 1); lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", G_PMTXMAXPAGE(lo), hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); sbuf_printf(sb, "%u p-structs\n", t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); for (i = 0; i < 4; i++) { if (chip_id(sc) > CHELSIO_T5) lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); else lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); if (is_t5(sc)) { used = G_T5_USED(lo); alloc = G_T5_ALLOC(lo); } else { used = G_USED(lo); alloc = G_ALLOC(lo); } /* For T6 these are MAC buffer groups */ sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", i, used, alloc); } for (i = 0; i < sc->chip_params->nchan; i++) { if (chip_id(sc) > CHELSIO_T5) lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); else lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); if (is_t5(sc)) { used = G_T5_USED(lo); alloc = G_T5_ALLOC(lo); } else { used = G_USED(lo); alloc = G_ALLOC(lo); } /* For T6 these are MAC buffer groups */ sbuf_printf(sb, "\nLoopback %d using %u pages out of %u allocated", i, used, alloc); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static inline void tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) { *mask = x | y; y = htobe64(y); memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); } static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; MPASS(chip_id(sc) <= CHELSIO_T5); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "Idx Ethernet address Mask Vld Ports PF" " VF Replication P0 P1 P2 P3 ML"); for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { uint64_t tcamx, tcamy, mask; uint32_t cls_lo, cls_hi; uint8_t addr[ETHER_ADDR_LEN]; tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); if (tcamx & tcamy) continue; tcamxy2valmask(tcamx, tcamy, addr, &mask); cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (uintmax_t)mask, (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', G_PORTMAP(cls_hi), G_PF(cls_lo), (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); if (cls_lo & F_REPLICATE) { struct fw_ldst_cmd ldst_cmd; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = htobe32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); ldst_cmd.u.mps.rplc.fid_idx = htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | V_FW_LDST_CMD_IDX(i)); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mps"); if (rc) break; rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); end_synchronized_op(sc, 0); if (rc != 0) { sbuf_printf(sb, "%36d", rc); rc = 0; } else { sbuf_printf(sb, " %08x %08x %08x %08x", be32toh(ldst_cmd.u.mps.rplc.rplc127_96), be32toh(ldst_cmd.u.mps.rplc.rplc95_64), be32toh(ldst_cmd.u.mps.rplc.rplc63_32), be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); } } else sbuf_printf(sb, "%36s", ""); sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); } if (rc) (void) sbuf_finish(sb); else rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; MPASS(chip_id(sc) > CHELSIO_T5); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" " Replication" " P0 P1 P2 P3 ML\n"); for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { uint8_t dip_hit, vlan_vld, lookup_type, port_num; uint16_t ivlan; uint64_t tcamx, tcamy, val, mask; uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; uint8_t addr[ETHER_ADDR_LEN]; ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); if (i < 256) ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); else ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); tcamy = G_DMACH(val) << 32; tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); lookup_type = G_DATALKPTYPE(data2); port_num = G_DATAPORTNUM(data2); if (lookup_type && lookup_type != M_DATALKPTYPE) { /* Inner header VNI */ vniy = ((data2 & F_DATAVIDH2) << 23) | (G_DATAVIDH1(data2) << 16) | G_VIDL(val); dip_hit = data2 & F_DATADIPHIT; vlan_vld = 0; } else { vniy = 0; dip_hit = 0; vlan_vld = data2 & F_DATAVIDH2; ivlan = G_VIDL(val); } ctl |= V_CTLXYBITSEL(1); t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); tcamx = G_DMACH(val) << 32; tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); if (lookup_type && lookup_type != M_DATALKPTYPE) { /* Inner header VNI mask */ vnix = ((data2 & F_DATAVIDH2) << 23) | (G_DATAVIDH1(data2) << 16) | G_VIDL(val); } else vnix = 0; if (tcamx & tcamy) continue; tcamxy2valmask(tcamx, tcamy, addr, &mask); cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); if (lookup_type && lookup_type != M_DATALKPTYPE) { sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012jx %06x %06x - - %3c" " 'I' %4x %3c %#x%4u%4d", i, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', G_PORTMAP(cls_hi), G_T6_PF(cls_lo), cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); } else { sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012jx - - ", i, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (uintmax_t)mask); if (vlan_vld) sbuf_printf(sb, "%4u Y ", ivlan); else sbuf_printf(sb, " - N "); sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", lookup_type ? 'I' : 'O', port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', G_PORTMAP(cls_hi), G_T6_PF(cls_lo), cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); } if (cls_lo & F_T6_REPLICATE) { struct fw_ldst_cmd ldst_cmd; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = htobe32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); ldst_cmd.u.mps.rplc.fid_idx = htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | V_FW_LDST_CMD_IDX(i)); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t6mps"); if (rc) break; rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); end_synchronized_op(sc, 0); if (rc != 0) { sbuf_printf(sb, "%72d", rc); rc = 0; } else { sbuf_printf(sb, " %08x %08x %08x %08x" " %08x %08x %08x %08x", be32toh(ldst_cmd.u.mps.rplc.rplc255_224), be32toh(ldst_cmd.u.mps.rplc.rplc223_192), be32toh(ldst_cmd.u.mps.rplc.rplc191_160), be32toh(ldst_cmd.u.mps.rplc.rplc159_128), be32toh(ldst_cmd.u.mps.rplc.rplc127_96), be32toh(ldst_cmd.u.mps.rplc.rplc95_64), be32toh(ldst_cmd.u.mps.rplc.rplc63_32), be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); } } else sbuf_printf(sb, "%72s", ""); sbuf_printf(sb, "%4u%3u%3u%3u %#x", G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), (cls_lo >> S_T6_MULTILISTEN0) & 0xf); } if (rc) (void) sbuf_finish(sb); else rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint16_t mtus[NMTUS]; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); t4_read_mtu_tbl(sc, mtus, NULL); sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], mtus[14], mtus[15]); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; static const char *tx_stats[MAX_PM_NSTATS] = { "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", "Tx FIFO wait", NULL, "Tx latency" }; static const char *rx_stats[MAX_PM_NSTATS] = { "Read:", "Write bypass:", "Write mem:", "Flush:", "Rx FIFO wait", NULL, "Rx latency" }; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); sbuf_printf(sb, " Tx pcmds Tx bytes"); for (i = 0; i < 4; i++) { sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], tx_cyc[i]); } sbuf_printf(sb, "\n Rx pcmds Rx bytes"); for (i = 0; i < 4; i++) { sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], rx_cyc[i]); } if (chip_id(sc) > CHELSIO_T5) { sbuf_printf(sb, "\n Total wait Total occupancy"); sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], tx_cyc[i]); sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], rx_cyc[i]); i += 2; MPASS(i < nitems(tx_stats)); sbuf_printf(sb, "\n Reads Total wait"); sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], tx_cyc[i]); sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], rx_cyc[i]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_rdma_stats stats; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); t4_tp_get_rdma_stats(sc, &stats, 0); mtx_unlock(&sc->reg_lock); sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_tcp_stats v4, v6; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); t4_tp_get_tcp_stats(sc, &v4, &v6, 0); mtx_unlock(&sc->reg_lock); sbuf_printf(sb, " IP IPv6\n"); sbuf_printf(sb, "OutRsts: %20u %20u\n", v4.tcp_out_rsts, v6.tcp_out_rsts); sbuf_printf(sb, "InSegs: %20ju %20ju\n", v4.tcp_in_segs, v6.tcp_in_segs); sbuf_printf(sb, "OutSegs: %20ju %20ju\n", v4.tcp_out_segs, v6.tcp_out_segs); sbuf_printf(sb, "RetransSegs: %20ju %20ju", v4.tcp_retrans_segs, v6.tcp_retrans_segs); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tids(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tid_info *t = &sc->tids; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (t->natids) { sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, t->atids_in_use); } if (t->nhpftids) { sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n", t->hpftid_base, t->hpftid_end, t->hpftids_in_use); } if (t->ntids) { sbuf_printf(sb, "TID range: "); if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { uint32_t b, hb; if (chip_id(sc) <= CHELSIO_T5) { b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; } else { b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE); } if (b) sbuf_printf(sb, "%u-%u, ", t->tid_base, b - 1); sbuf_printf(sb, "%u-%u", hb, t->ntids - 1); } else sbuf_printf(sb, "%u-%u", t->tid_base, t->ntids - 1); sbuf_printf(sb, ", in use: %u\n", atomic_load_acq_int(&t->tids_in_use)); } if (t->nstids) { sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, t->stid_base + t->nstids - 1, t->stids_in_use); } if (t->nftids) { sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base, t->ftid_end, t->ftids_in_use); } if (t->netids) { sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, t->etid_base + t->netids - 1); } sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_err_stats stats; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); t4_tp_get_err_stats(sc, &stats, 0); mtx_unlock(&sc->reg_lock); if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3\n"); sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", stats.mac_in_errs[0], stats.mac_in_errs[1], stats.mac_in_errs[2], stats.mac_in_errs[3]); sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", stats.hdr_in_errs[0], stats.hdr_in_errs[1], stats.hdr_in_errs[2], stats.hdr_in_errs[3]); sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", stats.tcp_in_errs[0], stats.tcp_in_errs[1], stats.tcp_in_errs[2], stats.tcp_in_errs[3]); sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); } else { sbuf_printf(sb, " channel 0 channel 1\n"); sbuf_printf(sb, "macInErrs: %10u %10u\n", stats.mac_in_errs[0], stats.mac_in_errs[1]); sbuf_printf(sb, "hdrInErrs: %10u %10u\n", stats.hdr_in_errs[0], stats.hdr_in_errs[1]); sbuf_printf(sb, "tcpInErrs: %10u %10u\n", stats.tcp_in_errs[0], stats.tcp_in_errs[1]); sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); } sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", stats.ofld_no_neigh, stats.ofld_cong_defer); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct tp_params *tpp = &sc->params.tp; u_int mask; int rc; mask = tpp->la_mask >> 16; rc = sysctl_handle_int(oidp, &mask, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (mask > 0xffff) return (EINVAL); tpp->la_mask = mask << 16; t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); return (0); } struct field_desc { const char *name; u_int start; u_int width; }; static void field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) { char buf[32]; int line_size = 0; while (f->name) { uint64_t mask = (1ULL << f->width) - 1; int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, ((uintmax_t)v >> f->start) & mask); if (line_size + len >= 79) { line_size = 8; sbuf_printf(sb, "\n "); } sbuf_printf(sb, "%s ", buf); line_size += len + 1; f++; } sbuf_printf(sb, "\n"); } static const struct field_desc tp_la0[] = { { "RcfOpCodeOut", 60, 4 }, { "State", 56, 4 }, { "WcfState", 52, 4 }, { "RcfOpcSrcOut", 50, 2 }, { "CRxError", 49, 1 }, { "ERxError", 48, 1 }, { "SanityFailed", 47, 1 }, { "SpuriousMsg", 46, 1 }, { "FlushInputMsg", 45, 1 }, { "FlushInputCpl", 44, 1 }, { "RssUpBit", 43, 1 }, { "RssFilterHit", 42, 1 }, { "Tid", 32, 10 }, { "InitTcb", 31, 1 }, { "LineNumber", 24, 7 }, { "Emsg", 23, 1 }, { "EdataOut", 22, 1 }, { "Cmsg", 21, 1 }, { "CdataOut", 20, 1 }, { "EreadPdu", 19, 1 }, { "CreadPdu", 18, 1 }, { "TunnelPkt", 17, 1 }, { "RcfPeerFin", 16, 1 }, { "RcfReasonOut", 12, 4 }, { "TxCchannel", 10, 2 }, { "RcfTxChannel", 8, 2 }, { "RxEchannel", 6, 2 }, { "RcfRxChannel", 5, 1 }, { "RcfDataOutSrdy", 4, 1 }, { "RxDvld", 3, 1 }, { "RxOoDvld", 2, 1 }, { "RxCongestion", 1, 1 }, { "TxCongestion", 0, 1 }, { NULL } }; static const struct field_desc tp_la1[] = { { "CplCmdIn", 56, 8 }, { "CplCmdOut", 48, 8 }, { "ESynOut", 47, 1 }, { "EAckOut", 46, 1 }, { "EFinOut", 45, 1 }, { "ERstOut", 44, 1 }, { "SynIn", 43, 1 }, { "AckIn", 42, 1 }, { "FinIn", 41, 1 }, { "RstIn", 40, 1 }, { "DataIn", 39, 1 }, { "DataInVld", 38, 1 }, { "PadIn", 37, 1 }, { "RxBufEmpty", 36, 1 }, { "RxDdp", 35, 1 }, { "RxFbCongestion", 34, 1 }, { "TxFbCongestion", 33, 1 }, { "TxPktSumSrdy", 32, 1 }, { "RcfUlpType", 28, 4 }, { "Eread", 27, 1 }, { "Ebypass", 26, 1 }, { "Esave", 25, 1 }, { "Static0", 24, 1 }, { "Cread", 23, 1 }, { "Cbypass", 22, 1 }, { "Csave", 21, 1 }, { "CPktOut", 20, 1 }, { "RxPagePoolFull", 18, 2 }, { "RxLpbkPkt", 17, 1 }, { "TxLpbkPkt", 16, 1 }, { "RxVfValid", 15, 1 }, { "SynLearned", 14, 1 }, { "SetDelEntry", 13, 1 }, { "SetInvEntry", 12, 1 }, { "CpcmdDvld", 11, 1 }, { "CpcmdSave", 10, 1 }, { "RxPstructsFull", 8, 2 }, { "EpcmdDvld", 7, 1 }, { "EpcmdFlush", 6, 1 }, { "EpcmdTrimPrefix", 5, 1 }, { "EpcmdTrimPostfix", 4, 1 }, { "ERssIp4Pkt", 3, 1 }, { "ERssIp6Pkt", 2, 1 }, { "ERssTcpUdpPkt", 1, 1 }, { "ERssFceFipPkt", 0, 1 }, { NULL } }; static const struct field_desc tp_la2[] = { { "CplCmdIn", 56, 8 }, { "MpsVfVld", 55, 1 }, { "MpsPf", 52, 3 }, { "MpsVf", 44, 8 }, { "SynIn", 43, 1 }, { "AckIn", 42, 1 }, { "FinIn", 41, 1 }, { "RstIn", 40, 1 }, { "DataIn", 39, 1 }, { "DataInVld", 38, 1 }, { "PadIn", 37, 1 }, { "RxBufEmpty", 36, 1 }, { "RxDdp", 35, 1 }, { "RxFbCongestion", 34, 1 }, { "TxFbCongestion", 33, 1 }, { "TxPktSumSrdy", 32, 1 }, { "RcfUlpType", 28, 4 }, { "Eread", 27, 1 }, { "Ebypass", 26, 1 }, { "Esave", 25, 1 }, { "Static0", 24, 1 }, { "Cread", 23, 1 }, { "Cbypass", 22, 1 }, { "Csave", 21, 1 }, { "CPktOut", 20, 1 }, { "RxPagePoolFull", 18, 2 }, { "RxLpbkPkt", 17, 1 }, { "TxLpbkPkt", 16, 1 }, { "RxVfValid", 15, 1 }, { "SynLearned", 14, 1 }, { "SetDelEntry", 13, 1 }, { "SetInvEntry", 12, 1 }, { "CpcmdDvld", 11, 1 }, { "CpcmdSave", 10, 1 }, { "RxPstructsFull", 8, 2 }, { "EpcmdDvld", 7, 1 }, { "EpcmdFlush", 6, 1 }, { "EpcmdTrimPrefix", 5, 1 }, { "EpcmdTrimPostfix", 4, 1 }, { "ERssIp4Pkt", 3, 1 }, { "ERssIp6Pkt", 2, 1 }, { "ERssTcpUdpPkt", 1, 1 }, { "ERssFceFipPkt", 0, 1 }, { NULL } }; static void tp_la_show(struct sbuf *sb, uint64_t *p, int idx) { field_desc_show(sb, *p, tp_la0); } static void tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) { if (idx) sbuf_printf(sb, "\n"); field_desc_show(sb, p[0], tp_la0); if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) field_desc_show(sb, p[1], tp_la0); } static void tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) { if (idx) sbuf_printf(sb, "\n"); field_desc_show(sb, p[0], tp_la0); if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); } static int sysctl_tp_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; uint64_t *buf, *p; int rc; u_int i, inc; void (*show_func)(struct sbuf *, uint64_t *, int); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); t4_tp_read_la(sc, buf, NULL); p = buf; switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { case 2: inc = 2; show_func = tp_la_show2; break; case 3: inc = 2; show_func = tp_la_show3; break; default: inc = 1; show_func = tp_la_show; } for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) (*show_func)(sb, p, i); rc = sbuf_finish(sb); sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); t4_get_chan_txrate(sc, nrate, orate); if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3\n"); sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", nrate[0], nrate[1], nrate[2], nrate[3]); sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", orate[0], orate[1], orate[2], orate[3]); } else { sbuf_printf(sb, " channel 0 channel 1\n"); sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", nrate[0], nrate[1]); sbuf_printf(sb, "Offload B/s: %10ju %10ju", orate[0], orate[1]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; uint32_t *buf, *p; int rc, i; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); t4_ulprx_read_la(sc, buf); p = buf; sbuf_printf(sb, " Pcmd Type Message" " Data"); for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); } rc = sbuf_finish(sb); sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, v; MPASS(chip_id(sc) >= CHELSIO_T5); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); v = t4_read_reg(sc, A_SGE_STAT_CFG); if (G_STATSOURCE_T5(v) == 7) { int mode; mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v); if (mode == 0) { sbuf_printf(sb, "total %d, incomplete %d", t4_read_reg(sc, A_SGE_STAT_TOTAL), t4_read_reg(sc, A_SGE_STAT_MATCH)); } else if (mode == 1) { sbuf_printf(sb, "total %d, data overflow %d", t4_read_reg(sc, A_SGE_STAT_TOTAL), t4_read_reg(sc, A_SGE_STAT_MATCH)); } else { sbuf_printf(sb, "unknown mode %d", mode); } } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_cpus(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; enum cpu_sets op = arg2; cpuset_t cpuset; struct sbuf *sb; int i, rc; MPASS(op == LOCAL_CPUS || op == INTR_CPUS); CPU_ZERO(&cpuset); rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset); if (rc != 0) return (rc); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); CPU_FOREACH(i) sbuf_printf(sb, "%d ", i); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } #ifdef TCP_OFFLOAD static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int *old_ports, *new_ports; int i, new_count, rc; if (req->newptr == NULL && req->oldptr == NULL) return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) * sizeof(sc->tt.tls_rx_ports[0]))); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx"); if (rc) return (rc); if (sc->tt.num_tls_rx_ports == 0) { i = -1; rc = SYSCTL_OUT(req, &i, sizeof(i)); } else rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports, sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0])); if (rc == 0 && req->newptr != NULL) { new_count = req->newlen / sizeof(new_ports[0]); new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE, M_WAITOK); rc = SYSCTL_IN(req, new_ports, new_count * sizeof(new_ports[0])); if (rc) goto err; /* Allow setting to a single '-1' to clear the list. */ if (new_count == 1 && new_ports[0] == -1) { ADAPTER_LOCK(sc); old_ports = sc->tt.tls_rx_ports; sc->tt.tls_rx_ports = NULL; sc->tt.num_tls_rx_ports = 0; ADAPTER_UNLOCK(sc); free(old_ports, M_CXGBE); } else { for (i = 0; i < new_count; i++) { if (new_ports[i] < 1 || new_ports[i] > IPPORT_MAX) { rc = EINVAL; goto err; } } ADAPTER_LOCK(sc); old_ports = sc->tt.tls_rx_ports; sc->tt.tls_rx_ports = new_ports; sc->tt.num_tls_rx_ports = new_count; ADAPTER_UNLOCK(sc); free(old_ports, M_CXGBE); new_ports = NULL; } err: free(new_ports, M_CXGBE); } end_synchronized_op(sc, 0); return (rc); } static void unit_conv(char *buf, size_t len, u_int val, u_int factor) { u_int rem = val % factor; if (rem == 0) snprintf(buf, len, "%u", val / factor); else { while (rem % 10 == 0) rem /= 10; snprintf(buf, len, "%u.%u", val / factor, rem); } } static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; char buf[16]; u_int res, re; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); switch (arg2) { case 0: /* timer_tick */ re = G_TIMERRESOLUTION(res); break; case 1: /* TCP timestamp tick */ re = G_TIMESTAMPRESOLUTION(res); break; case 2: /* DACK tick */ re = G_DELAYEDACKRESOLUTION(res); break; default: return (EDOOFUS); } unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); } static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; u_int res, dack_re, v; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); dack_re = G_DELAYEDACKRESOLUTION(res); v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER); return (sysctl_handle_int(oidp, &v, 0, req)); } static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int reg = arg2; u_int tre; u_long tp_tick_us, v; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL || reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER); tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); tp_tick_us = (cclk_ps << tre) / 1000000; if (reg == A_TP_INIT_SRTT) v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); else v = tp_tick_us * t4_read_reg(sc, reg); return (sysctl_handle_long(oidp, &v, 0, req)); } /* * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is * passed to this function. */ static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int idx = arg2; u_int v; MPASS(idx >= 0 && idx <= 24); v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf; return (sysctl_handle_int(oidp, &v, 0, req)); } static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int idx = arg2; u_int shift, v, r; MPASS(idx >= 0 && idx < 16); r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3); shift = (idx & 3) << 3; v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0; return (sysctl_handle_int(oidp, &v, 0, req)); } static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->pi->adapter; int idx, rc, i; struct sge_ofld_rxq *ofld_rxq; uint8_t v; idx = vi->ofld_tmr_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < 0 || idx >= SGE_NTIMERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4otmr"); if (rc) return (rc); v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1); for_each_ofld_rxq(vi, i, ofld_rxq) { #ifdef atomic_store_rel_8 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); #else ofld_rxq->iq.intr_params = v; #endif } vi->ofld_tmr_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (0); } static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->pi->adapter; int idx, rc; idx = vi->ofld_pktc_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < -1 || idx >= SGE_NCOUNTERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4opktc"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->ofld_pktc_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (rc); } #endif static int get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) { int rc; if (cntxt->cid > M_CTXTQID) return (EINVAL); if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) return (EINVAL); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); if (rc) return (rc); if (sc->flags & FW_OK) { rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); if (rc == 0) goto done; } /* * Read via firmware failed or wasn't even attempted. Read directly via * the backdoor. */ rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); done: end_synchronized_op(sc, 0); return (rc); } static int load_fw(struct adapter *sc, struct t4_data *fw) { int rc; uint8_t *fw_data; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); if (rc) return (rc); /* * The firmware, with the sole exception of the memory parity error * handler, runs from memory and not flash. It is almost always safe to * install a new firmware on a running system. Just set bit 1 in * hw.cxgbe.dflags or dev...dflags first. */ if (sc->flags & FULL_INIT_DONE && (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) { rc = EBUSY; goto done; } fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); if (fw_data == NULL) { rc = ENOMEM; goto done; } rc = copyin(fw->data, fw_data, fw->len); if (rc == 0) rc = -t4_load_fw(sc, fw_data, fw->len); free(fw_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int load_cfg(struct adapter *sc, struct t4_data *cfg) { int rc; uint8_t *cfg_data = NULL; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); if (rc) return (rc); if (cfg->len == 0) { /* clear */ rc = -t4_load_cfg(sc, NULL, 0); goto done; } cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); if (cfg_data == NULL) { rc = ENOMEM; goto done; } rc = copyin(cfg->data, cfg_data, cfg->len); if (rc == 0) rc = -t4_load_cfg(sc, cfg_data, cfg->len); free(cfg_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int load_boot(struct adapter *sc, struct t4_bootrom *br) { int rc; uint8_t *br_data = NULL; u_int offset; if (br->len > 1024 * 1024) return (EFBIG); if (br->pf_offset == 0) { /* pfidx */ if (br->pfidx_addr > 7) return (EINVAL); offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr, A_PCIE_PF_EXPROM_OFST))); } else if (br->pf_offset == 1) { /* offset */ offset = G_OFFSET(br->pfidx_addr); } else { return (EINVAL); } rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr"); if (rc) return (rc); if (br->len == 0) { /* clear */ rc = -t4_load_boot(sc, NULL, offset, 0); goto done; } br_data = malloc(br->len, M_CXGBE, M_WAITOK); if (br_data == NULL) { rc = ENOMEM; goto done; } rc = copyin(br->data, br_data, br->len); if (rc == 0) rc = -t4_load_boot(sc, br_data, offset, br->len); free(br_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int load_bootcfg(struct adapter *sc, struct t4_data *bc) { int rc; uint8_t *bc_data = NULL; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); if (rc) return (rc); if (bc->len == 0) { /* clear */ rc = -t4_load_bootcfg(sc, NULL, 0); goto done; } bc_data = malloc(bc->len, M_CXGBE, M_WAITOK); if (bc_data == NULL) { rc = ENOMEM; goto done; } rc = copyin(bc->data, bc_data, bc->len); if (rc == 0) rc = -t4_load_bootcfg(sc, bc_data, bc->len); free(bc_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump) { int rc; struct cudbg_init *cudbg; void *handle, *buf; /* buf is large, don't block if no memory is available */ buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO); if (buf == NULL) return (ENOMEM); handle = cudbg_alloc_handle(); if (handle == NULL) { rc = ENOMEM; goto done; } cudbg = cudbg_get_init(handle); cudbg->adap = sc; cudbg->print = (cudbg_print_cb)printf; #ifndef notyet device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n", __func__, dump->wr_flash, dump->len, dump->data); #endif if (dump->wr_flash) cudbg->use_flash = 1; MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap)); memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap)); rc = cudbg_collect(handle, buf, &dump->len); if (rc != 0) goto done; rc = copyout(buf, dump->data, dump->len); done: cudbg_free_handle(handle); free(buf, M_CXGBE); return (rc); } static void free_offload_policy(struct t4_offload_policy *op) { struct offload_rule *r; int i; if (op == NULL) return; r = &op->rule[0]; for (i = 0; i < op->nrules; i++, r++) { free(r->bpf_prog.bf_insns, M_CXGBE); } free(op->rule, M_CXGBE); free(op, M_CXGBE); } static int set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop) { int i, rc, len; struct t4_offload_policy *op, *old; struct bpf_program *bf; const struct offload_settings *s; struct offload_rule *r; void *u; if (!is_offload(sc)) return (ENODEV); if (uop->nrules == 0) { /* Delete installed policies. */ op = NULL; goto set_policy; } if (uop->nrules > 256) { /* arbitrary */ return (E2BIG); } /* Copy userspace offload policy to kernel */ op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK); op->nrules = uop->nrules; len = op->nrules * sizeof(struct offload_rule); op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); rc = copyin(uop->rule, op->rule, len); if (rc) { free(op->rule, M_CXGBE); free(op, M_CXGBE); return (rc); } r = &op->rule[0]; for (i = 0; i < op->nrules; i++, r++) { /* Validate open_type */ if (r->open_type != OPEN_TYPE_LISTEN && r->open_type != OPEN_TYPE_ACTIVE && r->open_type != OPEN_TYPE_PASSIVE && r->open_type != OPEN_TYPE_DONTCARE) { error: /* * Rules 0 to i have malloc'd filters that need to be * freed. Rules i+1 to nrules have userspace pointers * and should be left alone. */ op->nrules = i; free_offload_policy(op); return (rc); } /* Validate settings */ s = &r->settings; if ((s->offload != 0 && s->offload != 1) || s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED || s->sched_class < -1 || s->sched_class >= sc->chip_params->nsched_cls) { rc = EINVAL; goto error; } bf = &r->bpf_prog; u = bf->bf_insns; /* userspace ptr */ bf->bf_insns = NULL; if (bf->bf_len == 0) { /* legal, matches everything */ continue; } len = bf->bf_len * sizeof(*bf->bf_insns); bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); rc = copyin(u, bf->bf_insns, len); if (rc != 0) goto error; if (!bpf_validate(bf->bf_insns, bf->bf_len)) { rc = EINVAL; goto error; } } set_policy: rw_wlock(&sc->policy_lock); old = sc->policy; sc->policy = op; rw_wunlock(&sc->policy_lock); free_offload_policy(old); return (0); } #define MAX_READ_BUF_SIZE (128 * 1024) static int read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) { uint32_t addr, remaining, n; uint32_t *buf; int rc; uint8_t *dst; rc = validate_mem_range(sc, mr->addr, mr->len); if (rc != 0) return (rc); buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); addr = mr->addr; remaining = mr->len; dst = (void *)mr->data; while (remaining) { n = min(remaining, MAX_READ_BUF_SIZE); read_via_memwin(sc, 2, addr, buf, n); rc = copyout(buf, dst, n); if (rc != 0) break; dst += n; remaining -= n; addr += n; } free(buf, M_CXGBE); return (rc); } #undef MAX_READ_BUF_SIZE static int read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) { int rc; if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) return (EINVAL); if (i2cd->len > sizeof(i2cd->data)) return (EFBIG); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); if (rc) return (rc); rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, i2cd->offset, i2cd->len, &i2cd->data[0]); end_synchronized_op(sc, 0); return (rc); } int t4_os_find_pci_capability(struct adapter *sc, int cap) { int i; return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); } int t4_os_pci_save_state(struct adapter *sc) { device_t dev; struct pci_devinfo *dinfo; dev = sc->dev; dinfo = device_get_ivars(dev); pci_cfg_save(dev, dinfo, 0); return (0); } int t4_os_pci_restore_state(struct adapter *sc) { device_t dev; struct pci_devinfo *dinfo; dev = sc->dev; dinfo = device_get_ivars(dev); pci_cfg_restore(dev, dinfo); return (0); } void t4_os_portmod_changed(struct port_info *pi) { struct adapter *sc = pi->adapter; struct vi_info *vi; struct ifnet *ifp; static const char *mod_str[] = { NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" }; KASSERT((pi->flags & FIXED_IFMEDIA) == 0, ("%s: port_type %u", __func__, pi->port_type)); vi = &pi->vi[0]; if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) { PORT_LOCK(pi); build_medialist(pi); if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) { fixup_link_config(pi); apply_link_config(pi); } PORT_UNLOCK(pi); end_synchronized_op(sc, LOCK_HELD); } ifp = vi->ifp; if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) if_printf(ifp, "transceiver unplugged.\n"); else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) if_printf(ifp, "unknown transceiver inserted.\n"); else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) if_printf(ifp, "unsupported transceiver inserted.\n"); else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { if_printf(ifp, "%dGbps %s transceiver inserted.\n", port_top_speed(pi), mod_str[pi->mod_type]); } else { if_printf(ifp, "transceiver (type %d) inserted.\n", pi->mod_type); } } void t4_os_link_changed(struct port_info *pi) { struct vi_info *vi; struct ifnet *ifp; struct link_config *lc; int v; PORT_LOCK_ASSERT_OWNED(pi); for_each_vi(pi, v, vi) { ifp = vi->ifp; if (ifp == NULL) continue; lc = &pi->link_cfg; if (lc->link_ok) { ifp->if_baudrate = IF_Mbps(lc->speed); if_link_state_change(ifp, LINK_STATE_UP); } else { if_link_state_change(ifp, LINK_STATE_DOWN); } } } void t4_iterate(void (*func)(struct adapter *, void *), void *arg) { struct adapter *sc; sx_slock(&t4_list_lock); SLIST_FOREACH(sc, &t4_list, link) { /* * func should not make any assumptions about what state sc is * in - the only guarantee is that sc->sc_lock is a valid lock. */ func(sc, arg); } sx_sunlock(&t4_list_lock); } static int t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, struct thread *td) { int rc; struct adapter *sc = dev->si_drv1; rc = priv_check(td, PRIV_DRIVER); if (rc != 0) return (rc); switch (cmd) { case CHELSIO_T4_GETREG: { struct t4_reg *edata = (struct t4_reg *)data; if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) return (EFAULT); if (edata->size == 4) edata->val = t4_read_reg(sc, edata->addr); else if (edata->size == 8) edata->val = t4_read_reg64(sc, edata->addr); else return (EINVAL); break; } case CHELSIO_T4_SETREG: { struct t4_reg *edata = (struct t4_reg *)data; if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) return (EFAULT); if (edata->size == 4) { if (edata->val & 0xffffffff00000000) return (EINVAL); t4_write_reg(sc, edata->addr, (uint32_t) edata->val); } else if (edata->size == 8) t4_write_reg64(sc, edata->addr, edata->val); else return (EINVAL); break; } case CHELSIO_T4_REGDUMP: { struct t4_regdump *regs = (struct t4_regdump *)data; int reglen = t4_get_regs_len(sc); uint8_t *buf; if (regs->len < reglen) { regs->len = reglen; /* hint to the caller */ return (ENOBUFS); } regs->len = reglen; buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); get_regs(sc, regs, buf); rc = copyout(buf, regs->data, reglen); free(buf, M_CXGBE); break; } case CHELSIO_T4_GET_FILTER_MODE: rc = get_filter_mode(sc, (uint32_t *)data); break; case CHELSIO_T4_SET_FILTER_MODE: rc = set_filter_mode(sc, *(uint32_t *)data); break; case CHELSIO_T4_GET_FILTER: rc = get_filter(sc, (struct t4_filter *)data); break; case CHELSIO_T4_SET_FILTER: rc = set_filter(sc, (struct t4_filter *)data); break; case CHELSIO_T4_DEL_FILTER: rc = del_filter(sc, (struct t4_filter *)data); break; case CHELSIO_T4_GET_SGE_CONTEXT: rc = get_sge_context(sc, (struct t4_sge_context *)data); break; case CHELSIO_T4_LOAD_FW: rc = load_fw(sc, (struct t4_data *)data); break; case CHELSIO_T4_GET_MEM: rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); break; case CHELSIO_T4_GET_I2C: rc = read_i2c(sc, (struct t4_i2c_data *)data); break; case CHELSIO_T4_CLEAR_STATS: { int i, v, bg_map; u_int port_id = *(uint32_t *)data; struct port_info *pi; struct vi_info *vi; if (port_id >= sc->params.nports) return (EINVAL); pi = sc->port[port_id]; if (pi == NULL) return (EIO); /* MAC stats */ t4_clr_port_stats(sc, pi->tx_chan); pi->tx_parse_error = 0; pi->tnl_cong_drops = 0; mtx_lock(&sc->reg_lock); for_each_vi(pi, v, vi) { if (vi->flags & VI_INIT_DONE) - t4_clr_vi_stats(sc, vi->viid); + t4_clr_vi_stats(sc, vi->vin); } bg_map = pi->mps_bg_map; v = 0; /* reuse */ while (bg_map) { i = ffs(bg_map) - 1; t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1, A_TP_MIB_TNL_CNG_DROP_0 + i); bg_map &= ~(1 << i); } mtx_unlock(&sc->reg_lock); /* * Since this command accepts a port, clear stats for * all VIs on this port. */ for_each_vi(pi, v, vi) { if (vi->flags & VI_INIT_DONE) { struct sge_rxq *rxq; struct sge_txq *txq; struct sge_wrq *wrq; for_each_rxq(vi, i, rxq) { #if defined(INET) || defined(INET6) rxq->lro.lro_queued = 0; rxq->lro.lro_flushed = 0; #endif rxq->rxcsum = 0; rxq->vlan_extraction = 0; } for_each_txq(vi, i, txq) { txq->txcsum = 0; txq->tso_wrs = 0; txq->vlan_insertion = 0; txq->imm_wrs = 0; txq->sgl_wrs = 0; txq->txpkt_wrs = 0; txq->txpkts0_wrs = 0; txq->txpkts1_wrs = 0; txq->txpkts0_pkts = 0; txq->txpkts1_pkts = 0; mp_ring_reset_stats(txq->r); } #ifdef TCP_OFFLOAD /* nothing to clear for each ofld_rxq */ for_each_ofld_txq(vi, i, wrq) { wrq->tx_wrs_direct = 0; wrq->tx_wrs_copied = 0; } #endif if (IS_MAIN_VI(vi)) { wrq = &sc->sge.ctrlq[pi->port_id]; wrq->tx_wrs_direct = 0; wrq->tx_wrs_copied = 0; } } } break; } case CHELSIO_T4_SCHED_CLASS: rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); break; case CHELSIO_T4_SCHED_QUEUE: rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); break; case CHELSIO_T4_GET_TRACER: rc = t4_get_tracer(sc, (struct t4_tracer *)data); break; case CHELSIO_T4_SET_TRACER: rc = t4_set_tracer(sc, (struct t4_tracer *)data); break; case CHELSIO_T4_LOAD_CFG: rc = load_cfg(sc, (struct t4_data *)data); break; case CHELSIO_T4_LOAD_BOOT: rc = load_boot(sc, (struct t4_bootrom *)data); break; case CHELSIO_T4_LOAD_BOOTCFG: rc = load_bootcfg(sc, (struct t4_data *)data); break; case CHELSIO_T4_CUDBG_DUMP: rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data); break; case CHELSIO_T4_SET_OFLD_POLICY: rc = set_offload_policy(sc, (struct t4_offload_policy *)data); break; default: rc = ENOTTY; } return (rc); } #ifdef TCP_OFFLOAD void t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order) { t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | V_HPZ3(pgsz_order[3])); } static int toe_capability(struct vi_info *vi, int enable) { int rc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; ASSERT_SYNCHRONIZED_OP(sc); if (!is_offload(sc)) return (ENODEV); if (enable) { if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { /* TOE is already enabled. */ return (0); } /* * We need the port's queues around so that we're able to send * and receive CPLs to/from the TOE even if the ifnet for this * port has never been UP'd administratively. */ if (!(vi->flags & VI_INIT_DONE)) { rc = vi_full_init(vi); if (rc) return (rc); } if (!(pi->vi[0].flags & VI_INIT_DONE)) { rc = vi_full_init(&pi->vi[0]); if (rc) return (rc); } if (isset(&sc->offload_map, pi->port_id)) { /* TOE is enabled on another VI of this port. */ pi->uld_vis++; return (0); } if (!uld_active(sc, ULD_TOM)) { rc = t4_activate_uld(sc, ULD_TOM); if (rc == EAGAIN) { log(LOG_WARNING, "You must kldload t4_tom.ko before trying " "to enable TOE on a cxgbe interface.\n"); } if (rc != 0) return (rc); KASSERT(sc->tom_softc != NULL, ("%s: TOM activated but softc NULL", __func__)); KASSERT(uld_active(sc, ULD_TOM), ("%s: TOM activated but flag not set", __func__)); } /* Activate iWARP and iSCSI too, if the modules are loaded. */ if (!uld_active(sc, ULD_IWARP)) (void) t4_activate_uld(sc, ULD_IWARP); if (!uld_active(sc, ULD_ISCSI)) (void) t4_activate_uld(sc, ULD_ISCSI); pi->uld_vis++; setbit(&sc->offload_map, pi->port_id); } else { pi->uld_vis--; if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) return (0); KASSERT(uld_active(sc, ULD_TOM), ("%s: TOM never initialized?", __func__)); clrbit(&sc->offload_map, pi->port_id); } return (0); } /* * Add an upper layer driver to the global list. */ int t4_register_uld(struct uld_info *ui) { int rc = 0; struct uld_info *u; sx_xlock(&t4_uld_list_lock); SLIST_FOREACH(u, &t4_uld_list, link) { if (u->uld_id == ui->uld_id) { rc = EEXIST; goto done; } } SLIST_INSERT_HEAD(&t4_uld_list, ui, link); ui->refcount = 0; done: sx_xunlock(&t4_uld_list_lock); return (rc); } int t4_unregister_uld(struct uld_info *ui) { int rc = EINVAL; struct uld_info *u; sx_xlock(&t4_uld_list_lock); SLIST_FOREACH(u, &t4_uld_list, link) { if (u == ui) { if (ui->refcount > 0) { rc = EBUSY; goto done; } SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); rc = 0; goto done; } } done: sx_xunlock(&t4_uld_list_lock); return (rc); } int t4_activate_uld(struct adapter *sc, int id) { int rc; struct uld_info *ui; ASSERT_SYNCHRONIZED_OP(sc); if (id < 0 || id > ULD_MAX) return (EINVAL); rc = EAGAIN; /* kldoad the module with this ULD and try again. */ sx_slock(&t4_uld_list_lock); SLIST_FOREACH(ui, &t4_uld_list, link) { if (ui->uld_id == id) { if (!(sc->flags & FULL_INIT_DONE)) { rc = adapter_full_init(sc); if (rc != 0) break; } rc = ui->activate(sc); if (rc == 0) { setbit(&sc->active_ulds, id); ui->refcount++; } break; } } sx_sunlock(&t4_uld_list_lock); return (rc); } int t4_deactivate_uld(struct adapter *sc, int id) { int rc; struct uld_info *ui; ASSERT_SYNCHRONIZED_OP(sc); if (id < 0 || id > ULD_MAX) return (EINVAL); rc = ENXIO; sx_slock(&t4_uld_list_lock); SLIST_FOREACH(ui, &t4_uld_list, link) { if (ui->uld_id == id) { rc = ui->deactivate(sc); if (rc == 0) { clrbit(&sc->active_ulds, id); ui->refcount--; } break; } } sx_sunlock(&t4_uld_list_lock); return (rc); } int uld_active(struct adapter *sc, int uld_id) { MPASS(uld_id >= 0 && uld_id <= ULD_MAX); return (isset(&sc->active_ulds, uld_id)); } #endif /* * t = ptr to tunable. * nc = number of CPUs. * c = compiled in default for that tunable. */ static void calculate_nqueues(int *t, int nc, const int c) { int nq; if (*t > 0) return; nq = *t < 0 ? -*t : c; *t = min(nc, nq); } /* * Come up with reasonable defaults for some of the tunables, provided they're * not set by the user (in which case we'll use the values as is). */ static void tweak_tunables(void) { int nc = mp_ncpus; /* our snapshot of the number of CPUs */ if (t4_ntxq < 1) { #ifdef RSS t4_ntxq = rss_getnumbuckets(); #else calculate_nqueues(&t4_ntxq, nc, NTXQ); #endif } calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI); if (t4_nrxq < 1) { #ifdef RSS t4_nrxq = rss_getnumbuckets(); #else calculate_nqueues(&t4_nrxq, nc, NRXQ); #endif } calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI); #ifdef TCP_OFFLOAD calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ); calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI); calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ); calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI); if (t4_toecaps_allowed == -1) t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; if (t4_rdmacaps_allowed == -1) { t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | FW_CAPS_CONFIG_RDMA_RDMAC; } if (t4_iscsicaps_allowed == -1) { t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | FW_CAPS_CONFIG_ISCSI_TARGET_PDU | FW_CAPS_CONFIG_ISCSI_T10DIF; } if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS) t4_tmr_idx_ofld = TMR_IDX_OFLD; if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS) t4_pktc_idx_ofld = PKTC_IDX_OFLD; #else if (t4_toecaps_allowed == -1) t4_toecaps_allowed = 0; if (t4_rdmacaps_allowed == -1) t4_rdmacaps_allowed = 0; if (t4_iscsicaps_allowed == -1) t4_iscsicaps_allowed = 0; #endif #ifdef DEV_NETMAP calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI); calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI); #endif if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS) t4_tmr_idx = TMR_IDX; if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS) t4_pktc_idx = PKTC_IDX; if (t4_qsize_txq < 128) t4_qsize_txq = 128; if (t4_qsize_rxq < 128) t4_qsize_rxq = 128; while (t4_qsize_rxq & 7) t4_qsize_rxq++; t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; /* * Number of VIs to create per-port. The first VI is the "main" regular * VI for the port. The rest are additional virtual interfaces on the * same physical port. Note that the main VI does not have native * netmap support but the extra VIs do. * * Limit the number of VIs per port to the number of available * MAC addresses per port. */ if (t4_num_vis < 1) t4_num_vis = 1; if (t4_num_vis > nitems(vi_mac_funcs)) { t4_num_vis = nitems(vi_mac_funcs); printf("cxgbe: number of VIs limited to %d\n", t4_num_vis); } if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) { pcie_relaxed_ordering = 1; #if defined(__i386__) || defined(__amd64__) if (cpu_vendor_id == CPU_VENDOR_INTEL) pcie_relaxed_ordering = 0; #endif } } #ifdef DDB static void t4_dump_tcb(struct adapter *sc, int tid) { uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos; reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); save = t4_read_reg(sc, reg); base = sc->memwin[2].mw_base; /* Dump TCB for the tid */ tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); tcb_addr += tid * TCB_SIZE; if (is_t4(sc)) { pf = 0; win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */ } else { pf = V_PFNUM(sc->pf); win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */ } t4_write_reg(sc, reg, win_pos | pf); t4_read_reg(sc, reg); off = tcb_addr - win_pos; for (i = 0; i < 4; i++) { uint32_t buf[8]; for (j = 0; j < 8; j++, off += 4) buf[j] = htonl(t4_read_reg(sc, base + off)); db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); } t4_write_reg(sc, reg, save); t4_read_reg(sc, reg); } static void t4_dump_devlog(struct adapter *sc) { struct devlog_params *dparams = &sc->params.devlog; struct fw_devlog_e e; int i, first, j, m, nentries, rc; uint64_t ftstamp = UINT64_MAX; if (dparams->start == 0) { db_printf("devlog params not valid\n"); return; } nentries = dparams->size / sizeof(struct fw_devlog_e); m = fwmtype_to_hwmtype(dparams->memtype); /* Find the first entry. */ first = -1; for (i = 0; i < nentries && !db_pager_quit; i++) { rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), sizeof(e), (void *)&e); if (rc != 0) break; if (e.timestamp == 0) break; e.timestamp = be64toh(e.timestamp); if (e.timestamp < ftstamp) { ftstamp = e.timestamp; first = i; } } if (first == -1) return; i = first; do { rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), sizeof(e), (void *)&e); if (rc != 0) return; if (e.timestamp == 0) return; e.timestamp = be64toh(e.timestamp); e.seqno = be32toh(e.seqno); for (j = 0; j < 8; j++) e.params[j] = be32toh(e.params[j]); db_printf("%10d %15ju %8s %8s ", e.seqno, e.timestamp, (e.level < nitems(devlog_level_strings) ? devlog_level_strings[e.level] : "UNKNOWN"), (e.facility < nitems(devlog_facility_strings) ? devlog_facility_strings[e.facility] : "UNKNOWN")); db_printf(e.fmt, e.params[0], e.params[1], e.params[2], e.params[3], e.params[4], e.params[5], e.params[6], e.params[7]); if (++i == nentries) i = 0; } while (i != first && !db_pager_quit); } static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table); _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table); DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL) { device_t dev; int t; bool valid; valid = false; t = db_read_token(); if (t == tIDENT) { dev = device_lookup_by_name(db_tok_string); valid = true; } db_skip_to_eol(); if (!valid) { db_printf("usage: show t4 devlog \n"); return; } if (dev == NULL) { db_printf("device not found\n"); return; } t4_dump_devlog(device_get_softc(dev)); } DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL) { device_t dev; int radix, tid, t; bool valid; valid = false; radix = db_radix; db_radix = 10; t = db_read_token(); if (t == tIDENT) { dev = device_lookup_by_name(db_tok_string); t = db_read_token(); if (t == tNUMBER) { tid = db_tok_number; valid = true; } } db_radix = radix; db_skip_to_eol(); if (!valid) { db_printf("usage: show t4 tcb \n"); return; } if (dev == NULL) { db_printf("device not found\n"); return; } if (tid < 0) { db_printf("invalid tid\n"); return; } t4_dump_tcb(device_get_softc(dev), tid); } #endif /* * Borrowed from cesa_prep_aes_key(). * * NB: The crypto engine wants the words in the decryption key in reverse * order. */ void t4_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits) { uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; uint32_t *dkey; int i; rijndaelKeySetupEnc(ek, enc_key, kbits); dkey = dec_key; dkey += (kbits / 8) / 4; switch (kbits) { case 128: for (i = 0; i < 4; i++) *--dkey = htobe32(ek[4 * 10 + i]); break; case 192: for (i = 0; i < 2; i++) *--dkey = htobe32(ek[4 * 11 + 2 + i]); for (i = 0; i < 4; i++) *--dkey = htobe32(ek[4 * 12 + i]); break; case 256: for (i = 0; i < 4; i++) *--dkey = htobe32(ek[4 * 13 + i]); for (i = 0; i < 4; i++) *--dkey = htobe32(ek[4 * 14 + i]); break; } MPASS(dkey == dec_key); } static struct sx mlu; /* mod load unload */ SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); static int mod_event(module_t mod, int cmd, void *arg) { int rc = 0; static int loaded = 0; switch (cmd) { case MOD_LOAD: sx_xlock(&mlu); if (loaded++ == 0) { t4_sge_modload(); t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, t4_filter_rpl, CPL_COOKIE_FILTER); t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl, CPL_COOKIE_FILTER); t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER); t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER); t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER); t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); t4_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); sx_init(&t4_list_lock, "T4/T5 adapters"); SLIST_INIT(&t4_list); callout_init(&fatal_callout, 1); #ifdef TCP_OFFLOAD sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); SLIST_INIT(&t4_uld_list); #endif #ifdef INET6 t4_clip_modload(); #endif t4_tracer_modload(); tweak_tunables(); } sx_xunlock(&mlu); break; case MOD_UNLOAD: sx_xlock(&mlu); if (--loaded == 0) { int tries; sx_slock(&t4_list_lock); if (!SLIST_EMPTY(&t4_list)) { rc = EBUSY; sx_sunlock(&t4_list_lock); goto done_unload; } #ifdef TCP_OFFLOAD sx_slock(&t4_uld_list_lock); if (!SLIST_EMPTY(&t4_uld_list)) { rc = EBUSY; sx_sunlock(&t4_uld_list_lock); sx_sunlock(&t4_list_lock); goto done_unload; } #endif tries = 0; while (tries++ < 5 && t4_sge_extfree_refs() != 0) { uprintf("%ju clusters with custom free routine " "still is use.\n", t4_sge_extfree_refs()); pause("t4unload", 2 * hz); } #ifdef TCP_OFFLOAD sx_sunlock(&t4_uld_list_lock); #endif sx_sunlock(&t4_list_lock); if (t4_sge_extfree_refs() == 0) { t4_tracer_modunload(); #ifdef INET6 t4_clip_modunload(); #endif #ifdef TCP_OFFLOAD sx_destroy(&t4_uld_list_lock); #endif sx_destroy(&t4_list_lock); t4_sge_modunload(); loaded = 0; } else { rc = EBUSY; loaded++; /* undo earlier decrement */ } } done_unload: sx_xunlock(&mlu); break; } return (rc); } static devclass_t t4_devclass, t5_devclass, t6_devclass; static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass; static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass; DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); MODULE_VERSION(t4nex, 1); MODULE_DEPEND(t4nex, firmware, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(t4nex, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); MODULE_VERSION(t5nex, 1); MODULE_DEPEND(t5nex, firmware, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(t5nex, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0); MODULE_VERSION(t6nex, 1); MODULE_DEPEND(t6nex, firmware, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(t6nex, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); MODULE_VERSION(cxgbe, 1); DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); MODULE_VERSION(cxl, 1); DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0); MODULE_VERSION(cc, 1); DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); MODULE_VERSION(vcxgbe, 1); DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); MODULE_VERSION(vcxl, 1); DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0); MODULE_VERSION(vcc, 1); Index: stable/11/sys/dev/cxgbe/t4_sge.c =================================================================== --- stable/11/sys/dev/cxgbe/t4_sge.c (revision 346966) +++ stable/11/sys/dev/cxgbe/t4_sge.c (revision 346967) @@ -1,5327 +1,5324 @@ /*- * Copyright (c) 2011 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_NETMAP #include #include #include #include #include #endif #include "common/common.h" #include "common/t4_regs.h" #include "common/t4_regs_values.h" #include "common/t4_msg.h" #include "t4_l2t.h" #include "t4_mp_ring.h" #ifdef T4_PKT_TIMESTAMP #define RX_COPY_THRESHOLD (MINCLSIZE - 8) #else #define RX_COPY_THRESHOLD MINCLSIZE #endif /* * Ethernet frames are DMA'd at this byte offset into the freelist buffer. * 0-7 are valid values. */ static int fl_pktshift = 2; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pktshift, CTLFLAG_RDTUN, &fl_pktshift, 0, "payload DMA offset in rx buffer (bytes)"); /* * Pad ethernet payload up to this boundary. * -1: driver should figure out a good value. * 0: disable padding. * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. */ int fl_pad = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pad, CTLFLAG_RDTUN, &fl_pad, 0, "payload pad boundary (bytes)"); /* * Status page length. * -1: driver should figure out a good value. * 64 or 128 are the only other valid values. */ static int spg_len = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, spg_len, CTLFLAG_RDTUN, &spg_len, 0, "status page size (bytes)"); /* * Congestion drops. * -1: no congestion feedback (not recommended). * 0: backpressure the channel instead of dropping packets right away. * 1: no backpressure, drop packets for the congested queue immediately. */ static int cong_drop = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, cong_drop, CTLFLAG_RDTUN, &cong_drop, 0, "Congestion control for RX queues (0 = backpressure, 1 = drop"); /* * Deliver multiple frames in the same free list buffer if they fit. * -1: let the driver decide whether to enable buffer packing or not. * 0: disable buffer packing. * 1: enable buffer packing. */ static int buffer_packing = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, buffer_packing, CTLFLAG_RDTUN, &buffer_packing, 0, "Enable buffer packing"); /* * Start next frame in a packed buffer at this boundary. * -1: driver should figure out a good value. * T4: driver will ignore this and use the same value as fl_pad above. * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. */ static int fl_pack = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pack, CTLFLAG_RDTUN, &fl_pack, 0, "payload pack boundary (bytes)"); /* * Allow the driver to create mbuf(s) in a cluster allocated for rx. * 0: never; always allocate mbufs from the zone_mbuf UMA zone. * 1: ok to create mbuf(s) within a cluster if there is room. */ static int allow_mbufs_in_cluster = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, allow_mbufs_in_cluster, CTLFLAG_RDTUN, &allow_mbufs_in_cluster, 0, "Allow driver to create mbufs within a rx cluster"); /* * Largest rx cluster size that the driver is allowed to allocate. */ static int largest_rx_cluster = MJUM16BYTES; SYSCTL_INT(_hw_cxgbe, OID_AUTO, largest_rx_cluster, CTLFLAG_RDTUN, &largest_rx_cluster, 0, "Largest rx cluster (bytes)"); /* * Size of cluster allocation that's most likely to succeed. The driver will * fall back to this size if it fails to allocate clusters larger than this. */ static int safest_rx_cluster = PAGE_SIZE; SYSCTL_INT(_hw_cxgbe, OID_AUTO, safest_rx_cluster, CTLFLAG_RDTUN, &safest_rx_cluster, 0, "Safe rx cluster (bytes)"); /* * The interrupt holdoff timers are multiplied by this value on T6+. * 1 and 3-17 (both inclusive) are legal values. */ static int tscale = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, tscale, CTLFLAG_RDTUN, &tscale, 0, "Interrupt holdoff timer scale on T6+"); /* * Number of LRO entries in the lro_ctrl structure per rx queue. */ static int lro_entries = TCP_LRO_ENTRIES; SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_entries, CTLFLAG_RDTUN, &lro_entries, 0, "Number of LRO entries per RX queue"); /* * This enables presorting of frames before they're fed into tcp_lro_rx. */ static int lro_mbufs = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_mbufs, CTLFLAG_RDTUN, &lro_mbufs, 0, "Enable presorting of LRO frames"); struct txpkts { u_int wr_type; /* type 0 or type 1 */ u_int npkt; /* # of packets in this work request */ u_int plen; /* total payload (sum of all packets) */ u_int len16; /* # of 16B pieces used by this work request */ }; /* A packet's SGL. This + m_pkthdr has all info needed for tx */ struct sgl { struct sglist sg; struct sglist_seg seg[TX_SGL_SEGS]; }; static int service_iq(struct sge_iq *, int); static int service_iq_fl(struct sge_iq *, int); static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *); static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t, uint16_t, char *); static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, bus_addr_t *, void **); static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, void *); static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *, int, int); static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *); static void add_iq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, struct sge_iq *); static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *, struct sysctl_oid *, struct sge_fl *); static int alloc_fwq(struct adapter *); static int free_fwq(struct adapter *); static int alloc_ctrlq(struct adapter *, struct sge_wrq *, int, struct sysctl_oid *); static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int, struct sysctl_oid *); static int free_rxq(struct vi_info *, struct sge_rxq *); #ifdef TCP_OFFLOAD static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int, struct sysctl_oid *); static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *); #endif #ifdef DEV_NETMAP static int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int, struct sysctl_oid *); static int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *); static int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int, struct sysctl_oid *); static int free_nm_txq(struct vi_info *, struct sge_nm_txq *); #endif static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); #ifdef TCP_OFFLOAD static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); #endif static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *); static int free_eq(struct adapter *, struct sge_eq *); static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *, struct sysctl_oid *); static int free_wrq(struct adapter *, struct sge_wrq *); static int alloc_txq(struct vi_info *, struct sge_txq *, int, struct sysctl_oid *); static int free_txq(struct vi_info *, struct sge_txq *); static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); static inline void ring_fl_db(struct adapter *, struct sge_fl *); static int refill_fl(struct adapter *, struct sge_fl *, int); static void refill_sfl(void *); static int alloc_fl_sdesc(struct sge_fl *); static void free_fl_sdesc(struct adapter *, struct sge_fl *); static void find_best_refill_source(struct adapter *, struct sge_fl *, int); static void find_safe_refill_source(struct adapter *, struct sge_fl *); static void add_fl_to_sfl(struct adapter *, struct sge_fl *); static inline void get_pkt_gl(struct mbuf *, struct sglist *); static inline u_int txpkt_len16(u_int, u_int); static inline u_int txpkt_vm_len16(u_int, u_int); static inline u_int txpkts0_len16(u_int); static inline u_int txpkts1_len16(void); static u_int write_txpkt_wr(struct sge_txq *, struct fw_eth_tx_pkt_wr *, struct mbuf *, u_int); static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *, struct fw_eth_tx_pkt_vm_wr *, struct mbuf *, u_int); static int try_txpkts(struct mbuf *, struct mbuf *, struct txpkts *, u_int); static int add_to_txpkts(struct mbuf *, struct txpkts *, u_int); static u_int write_txpkts_wr(struct sge_txq *, struct fw_eth_tx_pkts_wr *, struct mbuf *, const struct txpkts *, u_int); static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int); static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int); static inline uint16_t read_hw_cidx(struct sge_eq *); static inline u_int reclaimable_tx_desc(struct sge_eq *); static inline u_int total_available_tx_desc(struct sge_eq *); static u_int reclaim_tx_descs(struct sge_txq *, u_int); static void tx_reclaim(void *, int); static __be64 get_flit(struct sglist_seg *, int, int); static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, struct mbuf *); static int handle_fw_msg(struct sge_iq *, const struct rss_header *, struct mbuf *); static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *); static void wrq_tx_drain(void *, int); static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *); static int sysctl_uint16(SYSCTL_HANDLER_ARGS); static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); static counter_u64_t extfree_refs; static counter_u64_t extfree_rels; an_handler_t t4_an_handler; fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES]; cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS]; cpl_handler_t set_tcb_rpl_handlers[NUM_CPL_COOKIES]; cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES]; cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES]; cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES]; cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES]; void t4_register_an_handler(an_handler_t h) { uintptr_t *loc; MPASS(h == NULL || t4_an_handler == NULL); loc = (uintptr_t *)&t4_an_handler; atomic_store_rel_ptr(loc, (uintptr_t)h); } void t4_register_fw_msg_handler(int type, fw_msg_handler_t h) { uintptr_t *loc; MPASS(type < nitems(t4_fw_msg_handler)); MPASS(h == NULL || t4_fw_msg_handler[type] == NULL); /* * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL * handler dispatch table. Reject any attempt to install a handler for * this subtype. */ MPASS(type != FW_TYPE_RSSCPL); MPASS(type != FW6_TYPE_RSSCPL); loc = (uintptr_t *)&t4_fw_msg_handler[type]; atomic_store_rel_ptr(loc, (uintptr_t)h); } void t4_register_cpl_handler(int opcode, cpl_handler_t h) { uintptr_t *loc; MPASS(opcode < nitems(t4_cpl_handler)); MPASS(h == NULL || t4_cpl_handler[opcode] == NULL); loc = (uintptr_t *)&t4_cpl_handler[opcode]; atomic_store_rel_ptr(loc, (uintptr_t)h); } static int set_tcb_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); u_int tid; int cookie; MPASS(m == NULL); tid = GET_TID(cpl); if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) { /* * The return code for filter-write is put in the CPL cookie so * we have to rely on the hardware tid (is_ftid) to determine * that this is a response to a filter. */ cookie = CPL_COOKIE_FILTER; } else { cookie = G_COOKIE(cpl->cookie); } MPASS(cookie > CPL_COOKIE_RESERVED); MPASS(cookie < nitems(set_tcb_rpl_handlers)); return (set_tcb_rpl_handlers[cookie](iq, rss, m)); } static int l2t_write_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1); unsigned int cookie; MPASS(m == NULL); cookie = GET_TID(rpl) & F_SYNC_WR ? CPL_COOKIE_TOM : CPL_COOKIE_FILTER; return (l2t_write_rpl_handlers[cookie](iq, rss, m)); } static int act_open_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); MPASS(m == NULL); MPASS(cookie != CPL_COOKIE_RESERVED); return (act_open_rpl_handlers[cookie](iq, rss, m)); } static int abort_rpl_rss_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; u_int cookie; MPASS(m == NULL); if (is_hashfilter(sc)) cookie = CPL_COOKIE_HASHFILTER; else cookie = CPL_COOKIE_TOM; return (abort_rpl_rss_handlers[cookie](iq, rss, m)); } static int fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); u_int cookie; MPASS(m == NULL); if (is_etid(sc, tid)) cookie = CPL_COOKIE_ETHOFLD; else cookie = CPL_COOKIE_TOM; return (fw4_ack_handlers[cookie](iq, rss, m)); } static void t4_init_shared_cpl_handlers(void) { t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl_handler); t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl_handler); t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler); t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler); t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler); } void t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie) { uintptr_t *loc; MPASS(opcode < nitems(t4_cpl_handler)); MPASS(cookie > CPL_COOKIE_RESERVED); MPASS(cookie < NUM_CPL_COOKIES); MPASS(t4_cpl_handler[opcode] != NULL); switch (opcode) { case CPL_SET_TCB_RPL: loc = (uintptr_t *)&set_tcb_rpl_handlers[cookie]; break; case CPL_L2T_WRITE_RPL: loc = (uintptr_t *)&l2t_write_rpl_handlers[cookie]; break; case CPL_ACT_OPEN_RPL: loc = (uintptr_t *)&act_open_rpl_handlers[cookie]; break; case CPL_ABORT_RPL_RSS: loc = (uintptr_t *)&abort_rpl_rss_handlers[cookie]; break; case CPL_FW4_ACK: loc = (uintptr_t *)&fw4_ack_handlers[cookie]; break; default: MPASS(0); return; } MPASS(h == NULL || *loc == (uintptr_t)NULL); atomic_store_rel_ptr(loc, (uintptr_t)h); } /* * Called on MOD_LOAD. Validates and calculates the SGE tunables. */ void t4_sge_modload(void) { if (fl_pktshift < 0 || fl_pktshift > 7) { printf("Invalid hw.cxgbe.fl_pktshift value (%d)," " using 2 instead.\n", fl_pktshift); fl_pktshift = 2; } if (spg_len != 64 && spg_len != 128) { int len; #if defined(__i386__) || defined(__amd64__) len = cpu_clflush_line_size > 64 ? 128 : 64; #else len = 64; #endif if (spg_len != -1) { printf("Invalid hw.cxgbe.spg_len value (%d)," " using %d instead.\n", spg_len, len); } spg_len = len; } if (cong_drop < -1 || cong_drop > 1) { printf("Invalid hw.cxgbe.cong_drop value (%d)," " using 0 instead.\n", cong_drop); cong_drop = 0; } if (tscale != 1 && (tscale < 3 || tscale > 17)) { printf("Invalid hw.cxgbe.tscale value (%d)," " using 1 instead.\n", tscale); tscale = 1; } extfree_refs = counter_u64_alloc(M_WAITOK); extfree_rels = counter_u64_alloc(M_WAITOK); counter_u64_zero(extfree_refs); counter_u64_zero(extfree_rels); t4_init_shared_cpl_handlers(); t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg); t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg); t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update); t4_register_cpl_handler(CPL_RX_PKT, t4_eth_rx); t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl); } void t4_sge_modunload(void) { counter_u64_free(extfree_refs); counter_u64_free(extfree_rels); } uint64_t t4_sge_extfree_refs(void) { uint64_t refs, rels; rels = counter_u64_fetch(extfree_rels); refs = counter_u64_fetch(extfree_refs); return (refs - rels); } static inline void setup_pad_and_pack_boundaries(struct adapter *sc) { uint32_t v, m; int pad, pack, pad_shift; pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT : X_INGPADBOUNDARY_SHIFT; pad = fl_pad; if (fl_pad < (1 << pad_shift) || fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) || !powerof2(fl_pad)) { /* * If there is any chance that we might use buffer packing and * the chip is a T4, then pick 64 as the pad/pack boundary. Set * it to the minimum allowed in all other cases. */ pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift; /* * For fl_pad = 0 we'll still write a reasonable value to the * register but all the freelists will opt out of padding. * We'll complain here only if the user tried to set it to a * value greater than 0 that was invalid. */ if (fl_pad > 0) { device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" " (%d), using %d instead.\n", fl_pad, pad); } } m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift); t4_set_reg_field(sc, A_SGE_CONTROL, m, v); if (is_t4(sc)) { if (fl_pack != -1 && fl_pack != pad) { /* Complain but carry on. */ device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," " using %d instead.\n", fl_pack, pad); } return; } pack = fl_pack; if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || !powerof2(fl_pack)) { pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); MPASS(powerof2(pack)); if (pack < 16) pack = 16; if (pack == 32) pack = 64; if (pack > 4096) pack = 4096; if (fl_pack != -1) { device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" " (%d), using %d instead.\n", fl_pack, pack); } } m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); if (pack == 16) v = V_INGPACKBOUNDARY(0); else v = V_INGPACKBOUNDARY(ilog2(pack) - 5); MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); } /* * adap->params.vpd.cclk must be set up before this is called. */ void t4_tweak_chip_settings(struct adapter *sc) { int i; uint32_t v, m; int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); static int sge_flbuf_sizes[] = { MCLBYTES, #if MJUMPAGESIZE != MCLBYTES MJUMPAGESIZE, MJUMPAGESIZE - CL_METADATA_SIZE, MJUMPAGESIZE - 2 * MSIZE - CL_METADATA_SIZE, #endif MJUM9BYTES, MJUM16BYTES, MCLBYTES - MSIZE - CL_METADATA_SIZE, MJUM9BYTES - CL_METADATA_SIZE, MJUM16BYTES - CL_METADATA_SIZE, }; KASSERT(sc->flags & MASTER_PF, ("%s: trying to change chip settings when not master.", __func__)); m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | V_EGRSTATUSPAGESIZE(spg_len == 128); t4_set_reg_field(sc, A_SGE_CONTROL, m, v); setup_pad_and_pack_boundaries(sc); v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); KASSERT(nitems(sge_flbuf_sizes) <= SGE_FLBUF_SIZES, ("%s: hw buffer size table too big", __func__)); t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0, 4096); t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE1, 65536); for (i = 0; i < min(nitems(sge_flbuf_sizes), SGE_FLBUF_SIZES); i++) { t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE15 - (4 * i), sge_flbuf_sizes[i]); } v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); KASSERT(intr_timer[0] <= timer_max, ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], timer_max)); for (i = 1; i < nitems(intr_timer); i++) { KASSERT(intr_timer[i] >= intr_timer[i - 1], ("%s: timers not listed in increasing order (%d)", __func__, i)); while (intr_timer[i] > timer_max) { if (i == nitems(intr_timer) - 1) { intr_timer[i] = timer_max; break; } intr_timer[i] += intr_timer[i - 1]; intr_timer[i] /= 2; } } v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); if (chip_id(sc) >= CHELSIO_T6) { m = V_TSCALE(M_TSCALE); if (tscale == 1) v = 0; else v = V_TSCALE(tscale - 2); t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v); if (sc->debug_flags & DF_DISABLE_TCB_CACHE) { m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN | V_WRTHRTHRESH(M_WRTHRTHRESH); t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1); v &= ~m; v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN | V_WRTHRTHRESH(16); t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1); } } /* 4K, 16K, 64K, 256K DDP "page sizes" */ v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); m = v = F_TDDPTAGTCB; t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); } /* * SGE wants the buffer to be at least 64B and then a multiple of 16. If * padding is in use, the buffer's start and end need to be aligned to the pad * boundary as well. We'll just make sure that the size is a multiple of the * boundary here, it is up to the buffer allocation code to make sure the start * of the buffer is aligned as well. */ static inline int hwsz_ok(struct adapter *sc, int hwsz) { int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1; return (hwsz >= 64 && (hwsz & mask) == 0); } /* * XXX: driver really should be able to deal with unexpected settings. */ int t4_read_chip_settings(struct adapter *sc) { struct sge *s = &sc->sge; struct sge_params *sp = &sc->params.sge; int i, j, n, rc = 0; uint32_t m, v, r; uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); static int sw_buf_sizes[] = { /* Sorted by size */ MCLBYTES, #if MJUMPAGESIZE != MCLBYTES MJUMPAGESIZE, #endif MJUM9BYTES, MJUM16BYTES }; struct sw_zone_info *swz, *safe_swz; struct hw_buf_info *hwb; m = F_RXPKTCPLMODE; v = F_RXPKTCPLMODE; r = sc->params.sge.sge_control; if ((r & m) != v) { device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); rc = EINVAL; } /* * If this changes then every single use of PAGE_SHIFT in the driver * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift. */ if (sp->page_shift != PAGE_SHIFT) { device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); rc = EINVAL; } /* Filter out unusable hw buffer sizes entirely (mark with -2). */ hwb = &s->hw_buf_info[0]; for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) { r = sc->params.sge.sge_fl_buffer_size[i]; hwb->size = r; hwb->zidx = hwsz_ok(sc, r) ? -1 : -2; hwb->next = -1; } /* * Create a sorted list in decreasing order of hw buffer sizes (and so * increasing order of spare area) for each software zone. * * If padding is enabled then the start and end of the buffer must align * to the pad boundary; if packing is enabled then they must align with * the pack boundary as well. Allocations from the cluster zones are * aligned to min(size, 4K), so the buffer starts at that alignment and * ends at hwb->size alignment. If mbuf inlining is allowed the * starting alignment will be reduced to MSIZE and the driver will * exercise appropriate caution when deciding on the best buffer layout * to use. */ n = 0; /* no usable buffer size to begin with */ swz = &s->sw_zone_info[0]; safe_swz = NULL; for (i = 0; i < SW_ZONE_SIZES; i++, swz++) { int8_t head = -1, tail = -1; swz->size = sw_buf_sizes[i]; swz->zone = m_getzone(swz->size); swz->type = m_gettype(swz->size); if (swz->size < PAGE_SIZE) { MPASS(powerof2(swz->size)); if (fl_pad && (swz->size % sp->pad_boundary != 0)) continue; } if (swz->size == safest_rx_cluster) safe_swz = swz; hwb = &s->hw_buf_info[0]; for (j = 0; j < SGE_FLBUF_SIZES; j++, hwb++) { if (hwb->zidx != -1 || hwb->size > swz->size) continue; #ifdef INVARIANTS if (fl_pad) MPASS(hwb->size % sp->pad_boundary == 0); #endif hwb->zidx = i; if (head == -1) head = tail = j; else if (hwb->size < s->hw_buf_info[tail].size) { s->hw_buf_info[tail].next = j; tail = j; } else { int8_t *cur; struct hw_buf_info *t; for (cur = &head; *cur != -1; cur = &t->next) { t = &s->hw_buf_info[*cur]; if (hwb->size == t->size) { hwb->zidx = -2; break; } if (hwb->size > t->size) { hwb->next = *cur; *cur = j; break; } } } } swz->head_hwidx = head; swz->tail_hwidx = tail; if (tail != -1) { n++; if (swz->size - s->hw_buf_info[tail].size >= CL_METADATA_SIZE) sc->flags |= BUF_PACKING_OK; } } if (n == 0) { device_printf(sc->dev, "no usable SGE FL buffer size.\n"); rc = EINVAL; } s->safe_hwidx1 = -1; s->safe_hwidx2 = -1; if (safe_swz != NULL) { s->safe_hwidx1 = safe_swz->head_hwidx; for (i = safe_swz->head_hwidx; i != -1; i = hwb->next) { int spare; hwb = &s->hw_buf_info[i]; #ifdef INVARIANTS if (fl_pad) MPASS(hwb->size % sp->pad_boundary == 0); #endif spare = safe_swz->size - hwb->size; if (spare >= CL_METADATA_SIZE) { s->safe_hwidx2 = i; break; } } } if (sc->flags & IS_VF) return (0); v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); if (r != v) { device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); rc = EINVAL; } m = v = F_TDDPTAGTCB; r = t4_read_reg(sc, A_ULP_RX_CTL); if ((r & m) != v) { device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); rc = EINVAL; } m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; r = t4_read_reg(sc, A_TP_PARA_REG5); if ((r & m) != v) { device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); rc = EINVAL; } t4_init_tp_params(sc, 1); t4_read_mtu_tbl(sc, sc->params.mtus, NULL); t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); return (rc); } int t4_create_dma_tag(struct adapter *sc) { int rc; rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->dmat); if (rc != 0) { device_printf(sc->dev, "failed to create main DMA tag: %d\n", rc); } return (rc); } void t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *children) { struct sge_params *sp = &sc->params.sge; SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A", "freelist buffer sizes"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, NULL, sp->pad_boundary, "payload pad boundary (bytes)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, NULL, sp->spg_len, "status page size (bytes)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, NULL, cong_drop, "congestion drop setting"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, NULL, sp->pack_boundary, "payload pack boundary (bytes)"); } int t4_destroy_dma_tag(struct adapter *sc) { if (sc->dmat) bus_dma_tag_destroy(sc->dmat); return (0); } /* * Allocate and initialize the firmware event queue, control queues, and special * purpose rx queues owned by the adapter. * * Returns errno on failure. Resources allocated up to that point may still be * allocated. Caller is responsible for cleanup in case this function fails. */ int t4_setup_adapter_queues(struct adapter *sc) { struct sysctl_oid *oid; struct sysctl_oid_list *children; int rc, i; ADAPTER_LOCK_ASSERT_NOTOWNED(sc); sysctl_ctx_init(&sc->ctx); sc->flags |= ADAP_SYSCTL_CTX; /* * Firmware event queue */ rc = alloc_fwq(sc); if (rc != 0) return (rc); /* * That's all for the VF driver. */ if (sc->flags & IS_VF) return (rc); oid = device_get_sysctl_tree(sc->dev); children = SYSCTL_CHILDREN(oid); /* * XXX: General purpose rx queues, one per port. */ /* * Control queues, one per port. */ oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD, NULL, "control queues"); for_each_port(sc, i) { struct sge_wrq *ctrlq = &sc->sge.ctrlq[i]; rc = alloc_ctrlq(sc, ctrlq, i, oid); if (rc != 0) return (rc); } return (rc); } /* * Idempotent */ int t4_teardown_adapter_queues(struct adapter *sc) { int i; ADAPTER_LOCK_ASSERT_NOTOWNED(sc); /* Do this before freeing the queue */ if (sc->flags & ADAP_SYSCTL_CTX) { sysctl_ctx_free(&sc->ctx); sc->flags &= ~ADAP_SYSCTL_CTX; } if (!(sc->flags & IS_VF)) { for_each_port(sc, i) free_wrq(sc, &sc->sge.ctrlq[i]); } free_fwq(sc); return (0); } /* Maximum payload that can be delivered with a single iq descriptor */ static inline int mtu_to_max_payload(struct adapter *sc, int mtu, const int toe) { int payload; #ifdef TCP_OFFLOAD if (toe) { int rxcs = G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); /* Note that COP can set rx_coalesce on/off per connection. */ payload = max(mtu, rxcs); } else { #endif /* large enough even when hw VLAN extraction is disabled */ payload = sc->params.sge.fl_pktshift + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + mtu; #ifdef TCP_OFFLOAD } #endif return (payload); } int t4_setup_vi_queues(struct vi_info *vi) { int rc = 0, i, intr_idx, iqidx; struct sge_rxq *rxq; struct sge_txq *txq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; struct sge_wrq *ofld_txq; #endif #ifdef DEV_NETMAP int saved_idx; struct sge_nm_rxq *nm_rxq; struct sge_nm_txq *nm_txq; #endif char name[16]; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct ifnet *ifp = vi->ifp; struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev); struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); int maxp, mtu = ifp->if_mtu; /* Interrupt vector to start from (when using multiple vectors) */ intr_idx = vi->first_intr; #ifdef DEV_NETMAP saved_idx = intr_idx; if (ifp->if_capabilities & IFCAP_NETMAP) { /* netmap is supported with direct interrupts only. */ MPASS(!forwarding_intr_to_fwq(sc)); /* * We don't have buffers to back the netmap rx queues * right now so we create the queues in a way that * doesn't set off any congestion signal in the chip. */ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq", CTLFLAG_RD, NULL, "rx queues"); for_each_nm_rxq(vi, i, nm_rxq) { rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid); if (rc != 0) goto done; intr_idx++; } oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq", CTLFLAG_RD, NULL, "tx queues"); for_each_nm_txq(vi, i, nm_txq) { iqidx = vi->first_nm_rxq + (i % vi->nnmrxq); rc = alloc_nm_txq(vi, nm_txq, iqidx, i, oid); if (rc != 0) goto done; } } /* Normal rx queues and netmap rx queues share the same interrupts. */ intr_idx = saved_idx; #endif /* * Allocate rx queues first because a default iqid is required when * creating a tx queue. */ maxp = mtu_to_max_payload(sc, mtu, 0); oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq", CTLFLAG_RD, NULL, "rx queues"); for_each_rxq(vi, i, rxq) { init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq); snprintf(name, sizeof(name), "%s rxq%d-fl", device_get_nameunit(vi->dev), i); init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name); rc = alloc_rxq(vi, rxq, forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); if (rc != 0) goto done; intr_idx++; } #ifdef DEV_NETMAP if (ifp->if_capabilities & IFCAP_NETMAP) intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq); #endif #ifdef TCP_OFFLOAD maxp = mtu_to_max_payload(sc, mtu, 1); oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq", CTLFLAG_RD, NULL, "rx queues for offloaded TCP connections"); for_each_ofld_rxq(vi, i, ofld_rxq) { init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx, vi->qsize_rxq); snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", device_get_nameunit(vi->dev), i); init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name); rc = alloc_ofld_rxq(vi, ofld_rxq, forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); if (rc != 0) goto done; intr_idx++; } #endif /* * Now the tx queues. */ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD, NULL, "tx queues"); for_each_txq(vi, i, txq) { iqidx = vi->first_rxq + (i % vi->nrxq); snprintf(name, sizeof(name), "%s txq%d", device_get_nameunit(vi->dev), i); init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, sc->sge.rxq[iqidx].iq.cntxt_id, name); rc = alloc_txq(vi, txq, i, oid); if (rc != 0) goto done; } #ifdef TCP_OFFLOAD oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq", CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections"); for_each_ofld_txq(vi, i, ofld_txq) { struct sysctl_oid *oid2; iqidx = vi->first_ofld_rxq + (i % vi->nofldrxq); snprintf(name, sizeof(name), "%s ofld_txq%d", device_get_nameunit(vi->dev), i); init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan, sc->sge.ofld_rxq[iqidx].iq.cntxt_id, name); snprintf(name, sizeof(name), "%d", i); oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, "offload tx queue"); rc = alloc_wrq(sc, vi, ofld_txq, oid2); if (rc != 0) goto done; } #endif done: if (rc) t4_teardown_vi_queues(vi); return (rc); } /* * Idempotent */ int t4_teardown_vi_queues(struct vi_info *vi) { int i; struct sge_rxq *rxq; struct sge_txq *txq; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct sge_wrq *ofld_txq; #endif #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif #ifdef DEV_NETMAP struct sge_nm_rxq *nm_rxq; struct sge_nm_txq *nm_txq; #endif /* Do this before freeing the queues */ if (vi->flags & VI_SYSCTL_CTX) { sysctl_ctx_free(&vi->ctx); vi->flags &= ~VI_SYSCTL_CTX; } #ifdef DEV_NETMAP if (vi->ifp->if_capabilities & IFCAP_NETMAP) { for_each_nm_txq(vi, i, nm_txq) { free_nm_txq(vi, nm_txq); } for_each_nm_rxq(vi, i, nm_rxq) { free_nm_rxq(vi, nm_rxq); } } #endif /* * Take down all the tx queues first, as they reference the rx queues * (for egress updates, etc.). */ for_each_txq(vi, i, txq) { free_txq(vi, txq); } #ifdef TCP_OFFLOAD for_each_ofld_txq(vi, i, ofld_txq) { free_wrq(sc, ofld_txq); } #endif /* * Then take down the rx queues. */ for_each_rxq(vi, i, rxq) { free_rxq(vi, rxq); } #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, i, ofld_rxq) { free_ofld_rxq(vi, ofld_rxq); } #endif return (0); } /* * Interrupt handler when the driver is using only 1 interrupt. This is a very * unusual scenario. * * a) Deals with errors, if any. * b) Services firmware event queue, which is taking interrupts for all other * queues. */ void t4_intr_all(void *arg) { struct adapter *sc = arg; struct sge_iq *fwq = &sc->sge.fwq; MPASS(sc->intr_count == 1); if (sc->intr_type == INTR_INTX) t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); t4_intr_err(arg); t4_intr_evt(fwq); } /* * Interrupt handler for errors (installed directly when multiple interrupts are * being used, or called by t4_intr_all). */ void t4_intr_err(void *arg) { struct adapter *sc = arg; uint32_t v; const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; if (sc->flags & ADAP_ERR) return; v = t4_read_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE)); if (v & F_PFSW) { sc->swintr++; t4_write_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE), v); } t4_slow_intr_handler(sc, verbose); } /* * Interrupt handler for iq-only queues. The firmware event queue is the only * such queue right now. */ void t4_intr_evt(void *arg) { struct sge_iq *iq = arg; if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { service_iq(iq, 0); (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); } } /* * Interrupt handler for iq+fl queues. */ void t4_intr(void *arg) { struct sge_iq *iq = arg; if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { service_iq_fl(iq, 0); (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); } } #ifdef DEV_NETMAP /* * Interrupt handler for netmap rx queues. */ void t4_nm_intr(void *arg) { struct sge_nm_rxq *nm_rxq = arg; if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) { service_nm_rxq(nm_rxq); (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON); } } /* * Interrupt handler for vectors shared between NIC and netmap rx queues. */ void t4_vi_intr(void *arg) { struct irq *irq = arg; MPASS(irq->nm_rxq != NULL); t4_nm_intr(irq->nm_rxq); MPASS(irq->rxq != NULL); t4_intr(irq->rxq); } #endif /* * Deals with interrupts on an iq-only (no freelist) queue. */ static int service_iq(struct sge_iq *iq, int budget) { struct sge_iq *q; struct adapter *sc = iq->adapter; struct iq_desc *d = &iq->desc[iq->cidx]; int ndescs = 0, limit; int rsp_type; uint32_t lq; STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); KASSERT((iq->flags & IQ_HAS_FL) == 0, ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq, iq->flags)); MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); MPASS((iq->flags & IQ_LRO_ENABLED) == 0); limit = budget ? budget : iq->qsize / 16; /* * We always come back and check the descriptor ring for new indirect * interrupts and other responses after running a single handler. */ for (;;) { while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { rmb(); rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); lq = be32toh(d->rsp.pldbuflen_qid); switch (rsp_type) { case X_RSPD_TYPE_FLBUF: panic("%s: data for an iq (%p) with no freelist", __func__, iq); /* NOTREACHED */ case X_RSPD_TYPE_CPL: KASSERT(d->rss.opcode < NUM_CPL_CMDS, ("%s: bad opcode %02x.", __func__, d->rss.opcode)); t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL); break; case X_RSPD_TYPE_INTR: /* * There are 1K interrupt-capable queues (qids 0 * through 1023). A response type indicating a * forwarded interrupt with a qid >= 1K is an * iWARP async notification. */ if (__predict_true(lq >= 1024)) { t4_an_handler(iq, &d->rsp); break; } q = sc->sge.iqmap[lq - sc->sge.iq_start - sc->sge.iq_base]; if (atomic_cmpset_int(&q->state, IQS_IDLE, IQS_BUSY)) { if (service_iq_fl(q, q->qsize / 16) == 0) { (void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); } else { STAILQ_INSERT_TAIL(&iql, q, link); } } break; default: KASSERT(0, ("%s: illegal response type %d on iq %p", __func__, rsp_type, iq)); log(LOG_ERR, "%s: illegal response type %d on iq %p", device_get_nameunit(sc->dev), rsp_type, iq); break; } d++; if (__predict_false(++iq->cidx == iq->sidx)) { iq->cidx = 0; iq->gen ^= F_RSPD_GEN; d = &iq->desc[0]; } if (__predict_false(++ndescs == limit)) { t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | V_INGRESSQID(iq->cntxt_id) | V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); ndescs = 0; if (budget) { return (EINPROGRESS); } } } if (STAILQ_EMPTY(&iql)) break; /* * Process the head only, and send it to the back of the list if * it's still not done. */ q = STAILQ_FIRST(&iql); STAILQ_REMOVE_HEAD(&iql, link); if (service_iq_fl(q, q->qsize / 8) == 0) (void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); else STAILQ_INSERT_TAIL(&iql, q, link); } t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); return (0); } static inline int sort_before_lro(struct lro_ctrl *lro) { return (lro->lro_mbuf_max != 0); } /* * Deals with interrupts on an iq+fl queue. */ static int service_iq_fl(struct sge_iq *iq, int budget) { struct sge_rxq *rxq = iq_to_rxq(iq); struct sge_fl *fl; struct adapter *sc = iq->adapter; struct iq_desc *d = &iq->desc[iq->cidx]; int ndescs = 0, limit; int rsp_type, refill, starved; uint32_t lq; uint16_t fl_hw_cidx; struct mbuf *m0; #if defined(INET) || defined(INET6) const struct timeval lro_timeout = {0, sc->lro_timeout}; struct lro_ctrl *lro = &rxq->lro; #endif KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); MPASS(iq->flags & IQ_HAS_FL); limit = budget ? budget : iq->qsize / 16; fl = &rxq->fl; fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ #if defined(INET) || defined(INET6) if (iq->flags & IQ_ADJ_CREDIT) { MPASS(sort_before_lro(lro)); iq->flags &= ~IQ_ADJ_CREDIT; if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { tcp_lro_flush_all(lro); t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); return (0); } ndescs = 1; } #else MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); #endif while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { rmb(); refill = 0; m0 = NULL; rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); lq = be32toh(d->rsp.pldbuflen_qid); switch (rsp_type) { case X_RSPD_TYPE_FLBUF: m0 = get_fl_payload(sc, fl, lq); if (__predict_false(m0 == NULL)) goto out; refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2; #ifdef T4_PKT_TIMESTAMP /* * 60 bit timestamp for the payload is * *(uint64_t *)m0->m_pktdat. Note that it is * in the leading free-space in the mbuf. The * kernel can clobber it during a pullup, * m_copymdata, etc. You need to make sure that * the mbuf reaches you unmolested if you care * about the timestamp. */ *(uint64_t *)m0->m_pktdat = be64toh(ctrl->u.last_flit) & 0xfffffffffffffff; #endif /* fall through */ case X_RSPD_TYPE_CPL: KASSERT(d->rss.opcode < NUM_CPL_CMDS, ("%s: bad opcode %02x.", __func__, d->rss.opcode)); t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); break; case X_RSPD_TYPE_INTR: /* * There are 1K interrupt-capable queues (qids 0 * through 1023). A response type indicating a * forwarded interrupt with a qid >= 1K is an * iWARP async notification. That is the only * acceptable indirect interrupt on this queue. */ if (__predict_false(lq < 1024)) { panic("%s: indirect interrupt on iq_fl %p " "with qid %u", __func__, iq, lq); } t4_an_handler(iq, &d->rsp); break; default: KASSERT(0, ("%s: illegal response type %d on iq %p", __func__, rsp_type, iq)); log(LOG_ERR, "%s: illegal response type %d on iq %p", device_get_nameunit(sc->dev), rsp_type, iq); break; } d++; if (__predict_false(++iq->cidx == iq->sidx)) { iq->cidx = 0; iq->gen ^= F_RSPD_GEN; d = &iq->desc[0]; } if (__predict_false(++ndescs == limit)) { t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | V_INGRESSQID(iq->cntxt_id) | V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); ndescs = 0; #if defined(INET) || defined(INET6) if (iq->flags & IQ_LRO_ENABLED && !sort_before_lro(lro) && sc->lro_timeout != 0) { tcp_lro_flush_inactive(lro, &lro_timeout); } #endif if (budget) { FL_LOCK(fl); refill_fl(sc, fl, 32); FL_UNLOCK(fl); return (EINPROGRESS); } } if (refill) { FL_LOCK(fl); refill_fl(sc, fl, 32); FL_UNLOCK(fl); fl_hw_cidx = fl->hw_cidx; } } out: #if defined(INET) || defined(INET6) if (iq->flags & IQ_LRO_ENABLED) { if (ndescs > 0 && lro->lro_mbuf_count > 8) { MPASS(sort_before_lro(lro)); /* hold back one credit and don't flush LRO state */ iq->flags |= IQ_ADJ_CREDIT; ndescs--; } else { tcp_lro_flush_all(lro); } } #endif t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); FL_LOCK(fl); starved = refill_fl(sc, fl, 64); FL_UNLOCK(fl); if (__predict_false(starved != 0)) add_fl_to_sfl(sc, fl); return (0); } static inline int cl_has_metadata(struct sge_fl *fl, struct cluster_layout *cll) { int rc = fl->flags & FL_BUF_PACKING || cll->region1 > 0; if (rc) MPASS(cll->region3 >= CL_METADATA_SIZE); return (rc); } static inline struct cluster_metadata * cl_metadata(struct adapter *sc, struct sge_fl *fl, struct cluster_layout *cll, caddr_t cl) { if (cl_has_metadata(fl, cll)) { struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; return ((struct cluster_metadata *)(cl + swz->size) - 1); } return (NULL); } static void rxb_free(struct mbuf *m, void *arg1, void *arg2) { uma_zone_t zone = arg1; caddr_t cl = arg2; uma_zfree(zone, cl); counter_u64_add(extfree_rels, 1); } /* * The mbuf returned by this function could be allocated from zone_mbuf or * constructed in spare room in the cluster. * * The mbuf carries the payload in one of these ways * a) frame inside the mbuf (mbuf from zone_mbuf) * b) m_cljset (for clusters without metadata) zone_mbuf * c) m_extaddref (cluster with metadata) inline mbuf * d) m_extaddref (cluster with metadata) zone_mbuf */ static struct mbuf * get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, int remaining) { struct mbuf *m; struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; struct cluster_layout *cll = &sd->cll; struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; struct hw_buf_info *hwb = &sc->sge.hw_buf_info[cll->hwidx]; struct cluster_metadata *clm = cl_metadata(sc, fl, cll, sd->cl); int len, blen; caddr_t payload; blen = hwb->size - fl->rx_offset; /* max possible in this buf */ len = min(remaining, blen); payload = sd->cl + cll->region1 + fl->rx_offset; if (fl->flags & FL_BUF_PACKING) { const u_int l = fr_offset + len; const u_int pad = roundup2(l, fl->buf_boundary) - l; if (fl->rx_offset + len + pad < hwb->size) blen = len + pad; MPASS(fl->rx_offset + blen <= hwb->size); } else { MPASS(fl->rx_offset == 0); /* not packing */ } if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { /* * Copy payload into a freshly allocated mbuf. */ m = fr_offset == 0 ? m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); if (m == NULL) return (NULL); fl->mbuf_allocated++; #ifdef T4_PKT_TIMESTAMP /* Leave room for a timestamp */ m->m_data += 8; #endif /* copy data to mbuf */ bcopy(payload, mtod(m, caddr_t), len); } else if (sd->nmbuf * MSIZE < cll->region1) { /* * There's spare room in the cluster for an mbuf. Create one * and associate it with the payload that's in the cluster. */ MPASS(clm != NULL); m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE); /* No bzero required */ if (m_init(m, M_NOWAIT, MT_DATA, fr_offset == 0 ? M_PKTHDR | M_NOFREE : M_NOFREE)) return (NULL); fl->mbuf_inlined++; m_extaddref(m, payload, blen, &clm->refcount, rxb_free, swz->zone, sd->cl); if (sd->nmbuf++ == 0) counter_u64_add(extfree_refs, 1); } else { /* * Grab an mbuf from zone_mbuf and associate it with the * payload in the cluster. */ m = fr_offset == 0 ? m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); if (m == NULL) return (NULL); fl->mbuf_allocated++; if (clm != NULL) { m_extaddref(m, payload, blen, &clm->refcount, rxb_free, swz->zone, sd->cl); if (sd->nmbuf++ == 0) counter_u64_add(extfree_refs, 1); } else { m_cljset(m, sd->cl, swz->type); sd->cl = NULL; /* consumed, not a recycle candidate */ } } if (fr_offset == 0) m->m_pkthdr.len = remaining; m->m_len = len; if (fl->flags & FL_BUF_PACKING) { fl->rx_offset += blen; MPASS(fl->rx_offset <= hwb->size); if (fl->rx_offset < hwb->size) return (m); /* without advancing the cidx */ } if (__predict_false(++fl->cidx % 8 == 0)) { uint16_t cidx = fl->cidx / 8; if (__predict_false(cidx == fl->sidx)) fl->cidx = cidx = 0; fl->hw_cidx = cidx; } fl->rx_offset = 0; return (m); } static struct mbuf * get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf) { struct mbuf *m0, *m, **pnext; u_int remaining; const u_int total = G_RSPD_LEN(len_newbuf); if (__predict_false(fl->flags & FL_BUF_RESUME)) { M_ASSERTPKTHDR(fl->m0); MPASS(fl->m0->m_pkthdr.len == total); MPASS(fl->remaining < total); m0 = fl->m0; pnext = fl->pnext; remaining = fl->remaining; fl->flags &= ~FL_BUF_RESUME; goto get_segment; } if (fl->rx_offset > 0 && len_newbuf & F_RSPD_NEWBUF) { fl->rx_offset = 0; if (__predict_false(++fl->cidx % 8 == 0)) { uint16_t cidx = fl->cidx / 8; if (__predict_false(cidx == fl->sidx)) fl->cidx = cidx = 0; fl->hw_cidx = cidx; } } /* * Payload starts at rx_offset in the current hw buffer. Its length is * 'len' and it may span multiple hw buffers. */ m0 = get_scatter_segment(sc, fl, 0, total); if (m0 == NULL) return (NULL); remaining = total - m0->m_len; pnext = &m0->m_next; while (remaining > 0) { get_segment: MPASS(fl->rx_offset == 0); m = get_scatter_segment(sc, fl, total - remaining, remaining); if (__predict_false(m == NULL)) { fl->m0 = m0; fl->pnext = pnext; fl->remaining = remaining; fl->flags |= FL_BUF_RESUME; return (NULL); } *pnext = m; pnext = &m->m_next; remaining -= m->m_len; } *pnext = NULL; M_ASSERTPKTHDR(m0); return (m0); } static int t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) { struct sge_rxq *rxq = iq_to_rxq(iq); struct ifnet *ifp = rxq->ifp; struct adapter *sc = iq->adapter; const struct cpl_rx_pkt *cpl = (const void *)(rss + 1); #if defined(INET) || defined(INET6) struct lro_ctrl *lro = &rxq->lro; #endif static const int sw_hashtype[4][2] = { {M_HASHTYPE_NONE, M_HASHTYPE_NONE}, {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6}, {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6}, {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6}, }; KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__, rss->opcode)); m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; m0->m_len -= sc->params.sge.fl_pktshift; m0->m_data += sc->params.sge.fl_pktshift; m0->m_pkthdr.rcvif = ifp; M_HASHTYPE_SET(m0, sw_hashtype[rss->hash_type][rss->ipv6]); m0->m_pkthdr.flowid = be32toh(rss->hash_val); if (cpl->csum_calc && !(cpl->err_vec & sc->params.tp.err_vec_mask)) { if (ifp->if_capenable & IFCAP_RXCSUM && cpl->l2info & htobe32(F_RXF_IP)) { m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); rxq->rxcsum++; } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && cpl->l2info & htobe32(F_RXF_IP6)) { m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | CSUM_PSEUDO_HDR); rxq->rxcsum++; } if (__predict_false(cpl->ip_frag)) m0->m_pkthdr.csum_data = be16toh(cpl->csum); else m0->m_pkthdr.csum_data = 0xffff; } if (cpl->vlan_ex) { m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); m0->m_flags |= M_VLANTAG; rxq->vlan_extraction++; } #if defined(INET) || defined(INET6) if (iq->flags & IQ_LRO_ENABLED) { if (sort_before_lro(lro)) { tcp_lro_queue_mbuf(lro, m0); return (0); /* queued for sort, then LRO */ } if (tcp_lro_rx(lro, m0, 0) == 0) return (0); /* queued for LRO */ } #endif ifp->if_input(ifp, m0); return (0); } /* * Must drain the wrq or make sure that someone else will. */ static void wrq_tx_drain(void *arg, int n) { struct sge_wrq *wrq = arg; struct sge_eq *eq = &wrq->eq; EQ_LOCK(eq); if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) drain_wrq_wr_list(wrq->adapter, wrq); EQ_UNLOCK(eq); } static void drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq) { struct sge_eq *eq = &wrq->eq; u_int available, dbdiff; /* # of hardware descriptors */ u_int n; struct wrqe *wr; struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ EQ_LOCK_ASSERT_OWNED(eq); MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); wr = STAILQ_FIRST(&wrq->wr_list); MPASS(wr != NULL); /* Must be called with something useful to do */ MPASS(eq->pidx == eq->dbidx); dbdiff = 0; do { eq->cidx = read_hw_cidx(eq); if (eq->pidx == eq->cidx) available = eq->sidx - 1; else available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; MPASS(wr->wrq == wrq); n = howmany(wr->wr_len, EQ_ESIZE); if (available < n) break; dst = (void *)&eq->desc[eq->pidx]; if (__predict_true(eq->sidx - eq->pidx > n)) { /* Won't wrap, won't end exactly at the status page. */ bcopy(&wr->wr[0], dst, wr->wr_len); eq->pidx += n; } else { int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; bcopy(&wr->wr[0], dst, first_portion); if (wr->wr_len > first_portion) { bcopy(&wr->wr[first_portion], &eq->desc[0], wr->wr_len - first_portion); } eq->pidx = n - (eq->sidx - eq->pidx); } wrq->tx_wrs_copied++; if (available < eq->sidx / 4 && atomic_cmpset_int(&eq->equiq, 0, 1)) { /* * XXX: This is not 100% reliable with some * types of WRs. But this is a very unusual * situation for an ofld/ctrl queue anyway. */ dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | F_FW_WR_EQUEQ); } dbdiff += n; if (dbdiff >= 16) { ring_eq_db(sc, eq, dbdiff); dbdiff = 0; } STAILQ_REMOVE_HEAD(&wrq->wr_list, link); free_wrqe(wr); MPASS(wrq->nwr_pending > 0); wrq->nwr_pending--; MPASS(wrq->ndesc_needed >= n); wrq->ndesc_needed -= n; } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); if (dbdiff) ring_eq_db(sc, eq, dbdiff); } /* * Doesn't fail. Holds on to work requests it can't send right away. */ void t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) { #ifdef INVARIANTS struct sge_eq *eq = &wrq->eq; #endif EQ_LOCK_ASSERT_OWNED(eq); MPASS(wr != NULL); MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); MPASS((wr->wr_len & 0x7) == 0); STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); wrq->nwr_pending++; wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) return; /* commit_wrq_wr will drain wr_list as well. */ drain_wrq_wr_list(sc, wrq); /* Doorbell must have caught up to the pidx. */ MPASS(eq->pidx == eq->dbidx); } void t4_update_fl_bufsize(struct ifnet *ifp) { struct vi_info *vi = ifp->if_softc; struct adapter *sc = vi->pi->adapter; struct sge_rxq *rxq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif struct sge_fl *fl; int i, maxp, mtu = ifp->if_mtu; maxp = mtu_to_max_payload(sc, mtu, 0); for_each_rxq(vi, i, rxq) { fl = &rxq->fl; FL_LOCK(fl); find_best_refill_source(sc, fl, maxp); FL_UNLOCK(fl); } #ifdef TCP_OFFLOAD maxp = mtu_to_max_payload(sc, mtu, 1); for_each_ofld_rxq(vi, i, ofld_rxq) { fl = &ofld_rxq->fl; FL_LOCK(fl); find_best_refill_source(sc, fl, maxp); FL_UNLOCK(fl); } #endif } static inline int mbuf_nsegs(struct mbuf *m) { M_ASSERTPKTHDR(m); KASSERT(m->m_pkthdr.l5hlen > 0, ("%s: mbuf %p missing information on # of segments.", __func__, m)); return (m->m_pkthdr.l5hlen); } static inline void set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) { M_ASSERTPKTHDR(m); m->m_pkthdr.l5hlen = nsegs; } static inline int mbuf_len16(struct mbuf *m) { int n; M_ASSERTPKTHDR(m); n = m->m_pkthdr.PH_loc.eight[0]; MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); return (n); } static inline void set_mbuf_len16(struct mbuf *m, uint8_t len16) { M_ASSERTPKTHDR(m); m->m_pkthdr.PH_loc.eight[0] = len16; } static inline int needs_tso(struct mbuf *m) { M_ASSERTPKTHDR(m); return (m->m_pkthdr.csum_flags & CSUM_TSO); } static inline int needs_l3_csum(struct mbuf *m) { M_ASSERTPKTHDR(m); return (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)); } static inline int needs_l4_csum(struct mbuf *m) { M_ASSERTPKTHDR(m); return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)); } static inline int needs_vlan_insertion(struct mbuf *m) { M_ASSERTPKTHDR(m); return (m->m_flags & M_VLANTAG); } static void * m_advance(struct mbuf **pm, int *poffset, int len) { struct mbuf *m = *pm; int offset = *poffset; uintptr_t p = 0; MPASS(len > 0); for (;;) { if (offset + len < m->m_len) { offset += len; p = mtod(m, uintptr_t) + offset; break; } len -= m->m_len - offset; m = m->m_next; offset = 0; MPASS(m != NULL); } *poffset = offset; *pm = m; return ((void *)p); } /* * Can deal with empty mbufs in the chain that have m_len = 0, but the chain * must have at least one mbuf that's not empty. */ static inline int count_mbuf_nsegs(struct mbuf *m) { vm_paddr_t lastb, next; vm_offset_t va; int len, nsegs; MPASS(m != NULL); nsegs = 0; lastb = 0; for (; m; m = m->m_next) { len = m->m_len; if (__predict_false(len == 0)) continue; va = mtod(m, vm_offset_t); next = pmap_kextract(va); nsegs += sglist_count(m->m_data, len); if (lastb + 1 == next) nsegs--; lastb = pmap_kextract(va + len - 1); } MPASS(nsegs > 0); return (nsegs); } /* * Analyze the mbuf to determine its tx needs. The mbuf passed in may change: * a) caller can assume it's been freed if this function returns with an error. * b) it may get defragged up if the gather list is too long for the hardware. */ int parse_pkt(struct adapter *sc, struct mbuf **mp) { struct mbuf *m0 = *mp, *m; int rc, nsegs, defragged = 0, offset; struct ether_header *eh; void *l3hdr; #if defined(INET) || defined(INET6) struct tcphdr *tcp; #endif uint16_t eh_type; M_ASSERTPKTHDR(m0); if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { rc = EINVAL; fail: m_freem(m0); *mp = NULL; return (rc); } restart: /* * First count the number of gather list segments in the payload. * Defrag the mbuf if nsegs exceeds the hardware limit. */ M_ASSERTPKTHDR(m0); MPASS(m0->m_pkthdr.len > 0); nsegs = count_mbuf_nsegs(m0); if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) { if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) { rc = EFBIG; goto fail; } *mp = m0 = m; /* update caller's copy after defrag */ goto restart; } if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN)) { m0 = m_pullup(m0, m0->m_pkthdr.len); if (m0 == NULL) { /* Should have left well enough alone. */ rc = EFBIG; goto fail; } *mp = m0; /* update caller's copy after pullup */ goto restart; } set_mbuf_nsegs(m0, nsegs); if (sc->flags & IS_VF) set_mbuf_len16(m0, txpkt_vm_len16(nsegs, needs_tso(m0))); else set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0))); if (!needs_tso(m0) && !(sc->flags & IS_VF && (needs_l3_csum(m0) || needs_l4_csum(m0)))) return (0); m = m0; eh = mtod(m, struct ether_header *); eh_type = ntohs(eh->ether_type); if (eh_type == ETHERTYPE_VLAN) { struct ether_vlan_header *evh = (void *)eh; eh_type = ntohs(evh->evl_proto); m0->m_pkthdr.l2hlen = sizeof(*evh); } else m0->m_pkthdr.l2hlen = sizeof(*eh); offset = 0; l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); switch (eh_type) { #ifdef INET6 case ETHERTYPE_IPV6: { struct ip6_hdr *ip6 = l3hdr; MPASS(!needs_tso(m0) || ip6->ip6_nxt == IPPROTO_TCP); m0->m_pkthdr.l3hlen = sizeof(*ip6); break; } #endif #ifdef INET case ETHERTYPE_IP: { struct ip *ip = l3hdr; m0->m_pkthdr.l3hlen = ip->ip_hl * 4; break; } #endif default: panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled" " with the same INET/INET6 options as the kernel.", __func__, eh_type); } #if defined(INET) || defined(INET6) if (needs_tso(m0)) { tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); m0->m_pkthdr.l4hlen = tcp->th_off * 4; } #endif MPASS(m0 == *mp); return (0); } void * start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie) { struct sge_eq *eq = &wrq->eq; struct adapter *sc = wrq->adapter; int ndesc, available; struct wrqe *wr; void *w; MPASS(len16 > 0); ndesc = howmany(len16, EQ_ESIZE / 16); MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC); EQ_LOCK(eq); if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) drain_wrq_wr_list(sc, wrq); if (!STAILQ_EMPTY(&wrq->wr_list)) { slowpath: EQ_UNLOCK(eq); wr = alloc_wrqe(len16 * 16, wrq); if (__predict_false(wr == NULL)) return (NULL); cookie->pidx = -1; cookie->ndesc = ndesc; return (&wr->wr); } eq->cidx = read_hw_cidx(eq); if (eq->pidx == eq->cidx) available = eq->sidx - 1; else available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; if (available < ndesc) goto slowpath; cookie->pidx = eq->pidx; cookie->ndesc = ndesc; TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); w = &eq->desc[eq->pidx]; IDXINCR(eq->pidx, ndesc, eq->sidx); if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { w = &wrq->ss[0]; wrq->ss_pidx = cookie->pidx; wrq->ss_len = len16 * 16; } EQ_UNLOCK(eq); return (w); } void commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie) { struct sge_eq *eq = &wrq->eq; struct adapter *sc = wrq->adapter; int ndesc, pidx; struct wrq_cookie *prev, *next; if (cookie->pidx == -1) { struct wrqe *wr = __containerof(w, struct wrqe, wr); t4_wrq_tx(sc, wr); return; } if (__predict_false(w == &wrq->ss[0])) { int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; MPASS(wrq->ss_len > n); /* WR had better wrap around. */ bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); wrq->tx_wrs_ss++; } else wrq->tx_wrs_direct++; EQ_LOCK(eq); ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ pidx = cookie->pidx; MPASS(pidx >= 0 && pidx < eq->sidx); prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link); next = TAILQ_NEXT(cookie, link); if (prev == NULL) { MPASS(pidx == eq->dbidx); if (next == NULL || ndesc >= 16) { int available; struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ /* * Note that the WR via which we'll request tx updates * is at pidx and not eq->pidx, which has moved on * already. */ dst = (void *)&eq->desc[pidx]; available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; if (available < eq->sidx / 4 && atomic_cmpset_int(&eq->equiq, 0, 1)) { /* * XXX: This is not 100% reliable with some * types of WRs. But this is a very unusual * situation for an ofld/ctrl queue anyway. */ dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | F_FW_WR_EQUEQ); } ring_eq_db(wrq->adapter, eq, ndesc); } else { MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); next->pidx = pidx; next->ndesc += ndesc; } } else { MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); prev->ndesc += ndesc; } TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) drain_wrq_wr_list(sc, wrq); #ifdef INVARIANTS if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { /* Doorbell must have caught up to the pidx. */ MPASS(wrq->eq.pidx == wrq->eq.dbidx); } #endif EQ_UNLOCK(eq); } static u_int can_resume_eth_tx(struct mp_ring *r) { struct sge_eq *eq = r->cookie; return (total_available_tx_desc(eq) > eq->sidx / 8); } static inline int cannot_use_txpkts(struct mbuf *m) { /* maybe put a GL limit too, to avoid silliness? */ return (needs_tso(m)); } static inline int discard_tx(struct sge_eq *eq) { return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED); } /* * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to * be consumed. Return the actual number consumed. 0 indicates a stall. */ static u_int eth_tx(struct mp_ring *r, u_int cidx, u_int pidx) { struct sge_txq *txq = r->cookie; struct sge_eq *eq = &txq->eq; struct ifnet *ifp = txq->ifp; struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; u_int total, remaining; /* # of packets */ u_int available, dbdiff; /* # of hardware descriptors */ u_int n, next_cidx; struct mbuf *m0, *tail; struct txpkts txp; struct fw_eth_tx_pkts_wr *wr; /* any fw WR struct will do */ remaining = IDXDIFF(pidx, cidx, r->size); MPASS(remaining > 0); /* Must not be called without work to do. */ total = 0; TXQ_LOCK(txq); if (__predict_false(discard_tx(eq))) { while (cidx != pidx) { m0 = r->items[cidx]; m_freem(m0); if (++cidx == r->size) cidx = 0; } reclaim_tx_descs(txq, 2048); total = remaining; goto done; } /* How many hardware descriptors do we have readily available. */ if (eq->pidx == eq->cidx) available = eq->sidx - 1; else available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; dbdiff = IDXDIFF(eq->pidx, eq->dbidx, eq->sidx); while (remaining > 0) { m0 = r->items[cidx]; M_ASSERTPKTHDR(m0); MPASS(m0->m_nextpkt == NULL); if (available < SGE_MAX_WR_NDESC) { available += reclaim_tx_descs(txq, 64); if (available < howmany(mbuf_len16(m0), EQ_ESIZE / 16)) break; /* out of descriptors */ } next_cidx = cidx + 1; if (__predict_false(next_cidx == r->size)) next_cidx = 0; wr = (void *)&eq->desc[eq->pidx]; if (sc->flags & IS_VF) { total++; remaining--; ETHER_BPF_MTAP(ifp, m0); n = write_txpkt_vm_wr(sc, txq, (void *)wr, m0, available); } else if (remaining > 1 && try_txpkts(m0, r->items[next_cidx], &txp, available) == 0) { /* pkts at cidx, next_cidx should both be in txp. */ MPASS(txp.npkt == 2); tail = r->items[next_cidx]; MPASS(tail->m_nextpkt == NULL); ETHER_BPF_MTAP(ifp, m0); ETHER_BPF_MTAP(ifp, tail); m0->m_nextpkt = tail; if (__predict_false(++next_cidx == r->size)) next_cidx = 0; while (next_cidx != pidx) { if (add_to_txpkts(r->items[next_cidx], &txp, available) != 0) break; tail->m_nextpkt = r->items[next_cidx]; tail = tail->m_nextpkt; ETHER_BPF_MTAP(ifp, tail); if (__predict_false(++next_cidx == r->size)) next_cidx = 0; } n = write_txpkts_wr(txq, wr, m0, &txp, available); total += txp.npkt; remaining -= txp.npkt; } else { total++; remaining--; ETHER_BPF_MTAP(ifp, m0); n = write_txpkt_wr(txq, (void *)wr, m0, available); } MPASS(n >= 1 && n <= available && n <= SGE_MAX_WR_NDESC); available -= n; dbdiff += n; IDXINCR(eq->pidx, n, eq->sidx); if (total_available_tx_desc(eq) < eq->sidx / 4 && atomic_cmpset_int(&eq->equiq, 0, 1)) { wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | F_FW_WR_EQUEQ); eq->equeqidx = eq->pidx; } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); eq->equeqidx = eq->pidx; } if (dbdiff >= 16 && remaining >= 4) { ring_eq_db(sc, eq, dbdiff); available += reclaim_tx_descs(txq, 4 * dbdiff); dbdiff = 0; } cidx = next_cidx; } if (dbdiff != 0) { ring_eq_db(sc, eq, dbdiff); reclaim_tx_descs(txq, 32); } done: TXQ_UNLOCK(txq); return (total); } static inline void init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, int qsize) { KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, ("%s: bad tmr_idx %d", __func__, tmr_idx)); KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ ("%s: bad pktc_idx %d", __func__, pktc_idx)); iq->flags = 0; iq->adapter = sc; iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); iq->intr_pktc_idx = SGE_NCOUNTERS - 1; if (pktc_idx >= 0) { iq->intr_params |= F_QINTR_CNT_EN; iq->intr_pktc_idx = pktc_idx; } iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; } static inline void init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) { fl->qsize = qsize; fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; strlcpy(fl->lockname, name, sizeof(fl->lockname)); if (sc->flags & BUF_PACKING_OK && ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ fl->flags |= FL_BUF_PACKING; find_best_refill_source(sc, fl, maxp); find_safe_refill_source(sc, fl); } static inline void init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize, uint8_t tx_chan, uint16_t iqid, char *name) { KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); eq->flags = eqtype & EQ_TYPEMASK; eq->tx_chan = tx_chan; eq->iqid = iqid; eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; strlcpy(eq->lockname, name, sizeof(eq->lockname)); } static int alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, bus_dmamap_t *map, bus_addr_t *pa, void **va) { int rc; rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); if (rc != 0) { device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); goto done; } rc = bus_dmamem_alloc(*tag, va, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); if (rc != 0) { device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); goto done; } rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); if (rc != 0) { device_printf(sc->dev, "cannot load DMA map: %d\n", rc); goto done; } done: if (rc) free_ring(sc, *tag, *map, *pa, *va); return (rc); } static int free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, bus_addr_t pa, void *va) { if (pa) bus_dmamap_unload(tag, map); if (va) bus_dmamem_free(tag, va, map); if (tag) bus_dma_tag_destroy(tag); return (0); } /* * Allocates the ring for an ingress queue and an optional freelist. If the * freelist is specified it will be allocated and then associated with the * ingress queue. * * Returns errno on failure. Resources allocated up to that point may still be * allocated. Caller is responsible for cleanup in case this function fails. * * If the ingress queue will take interrupts directly then the intr_idx * specifies the vector, starting from 0. -1 means the interrupts for this * queue should be forwarded to the fwq. */ static int alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl, int intr_idx, int cong) { int rc, i, cntxt_id; size_t len; struct fw_iq_cmd c; struct port_info *pi = vi->pi; struct adapter *sc = iq->adapter; struct sge_params *sp = &sc->params.sge; __be32 v = 0; len = iq->qsize * IQ_ESIZE; rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, (void **)&iq->desc); if (rc != 0) return (rc); bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | V_FW_IQ_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | FW_LEN16(c)); /* Special handling for firmware event queue */ if (iq == &sc->sge.fwq) v |= F_FW_IQ_CMD_IQASYNCH; if (intr_idx < 0) { /* Forwarded interrupts, all headed to fwq */ v |= F_FW_IQ_CMD_IQANDST; v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id); } else { KASSERT(intr_idx < sc->intr_count, ("%s: invalid direct intr_idx %d", __func__, intr_idx)); v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); } c.type_to_iqandstindex = htobe32(v | V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | V_FW_IQ_CMD_VIID(vi->viid) | V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | F_FW_IQ_CMD_IQGTSMODE | V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); c.iqsize = htobe16(iq->qsize); c.iqaddr = htobe64(iq->ba); if (cong >= 0) c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); if (fl) { mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); len = fl->qsize * EQ_ESIZE; rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, &fl->ba, (void **)&fl->desc); if (rc) return (rc); /* Allocate space for one software descriptor per buffer. */ rc = alloc_fl_sdesc(fl); if (rc != 0) { device_printf(sc->dev, "failed to setup fl software descriptors: %d\n", rc); return (rc); } if (fl->flags & FL_BUF_PACKING) { fl->lowat = roundup2(sp->fl_starve_threshold2, 8); fl->buf_boundary = sp->pack_boundary; } else { fl->lowat = roundup2(sp->fl_starve_threshold, 8); fl->buf_boundary = 16; } if (fl_pad && fl->buf_boundary < sp->pad_boundary) fl->buf_boundary = sp->pad_boundary; c.iqns_to_fl0congen |= htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 0)); if (cong >= 0) { c.iqns_to_fl0congen |= htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | F_FW_IQ_CMD_FL0CONGEN); } c.fl0dcaen_to_fl0cidxfthresh = htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) | V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); c.fl0size = htobe16(fl->qsize); c.fl0addr = htobe64(fl->ba); } rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(sc->dev, "failed to create ingress queue: %d\n", rc); return (rc); } iq->cidx = 0; iq->gen = F_RSPD_GEN; iq->intr_next = iq->intr_params; iq->cntxt_id = be16toh(c.iqid); iq->abs_id = be16toh(c.physiqid); iq->flags |= IQ_ALLOCATED; cntxt_id = iq->cntxt_id - sc->sge.iq_start; if (cntxt_id >= sc->sge.niq) { panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.niq - 1); } sc->sge.iqmap[cntxt_id] = iq; if (fl) { u_int qid; iq->flags |= IQ_HAS_FL; fl->cntxt_id = be16toh(c.fl0id); fl->pidx = fl->cidx = 0; cntxt_id = fl->cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) { panic("%s: fl->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); } sc->sge.eqmap[cntxt_id] = (void *)fl; qid = fl->cntxt_id; if (isset(&sc->doorbells, DOORBELL_UDB)) { uint32_t s_qpp = sc->params.sge.eq_s_qpp; uint32_t mask = (1 << s_qpp) - 1; volatile uint8_t *udb; udb = sc->udbs_base + UDBS_DB_OFFSET; udb += (qid >> s_qpp) << PAGE_SHIFT; qid &= mask; if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { udb += qid << UDBS_SEG_SHIFT; qid = 0; } fl->udb = (volatile void *)udb; } fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db; FL_LOCK(fl); /* Enough to make sure the SGE doesn't think it's starved */ refill_fl(sc, fl, fl->lowat); FL_UNLOCK(fl); } if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && cong >= 0) { uint32_t param, val; param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); if (cong == 0) val = 1 << 19; else { val = 2 << 19; for (i = 0; i < 4; i++) { if (cong & (1 << i)) val |= 1 << (i << 2); } } rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc != 0) { /* report error but carry on */ device_printf(sc->dev, "failed to set congestion manager context for " "ingress queue %d: %d\n", iq->cntxt_id, rc); } } /* Enable IQ interrupts */ atomic_store_rel_int(&iq->state, IQS_IDLE); t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id)); return (0); } static int free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl) { int rc; struct adapter *sc = iq->adapter; device_t dev; if (sc == NULL) return (0); /* nothing to do */ dev = vi ? vi->dev : sc->dev; if (iq->flags & IQ_ALLOCATED) { rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff); if (rc != 0) { device_printf(dev, "failed to free queue %p: %d\n", iq, rc); return (rc); } iq->flags &= ~IQ_ALLOCATED; } free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); bzero(iq, sizeof(*iq)); if (fl) { free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, fl->desc); if (fl->sdesc) free_fl_sdesc(sc, fl); if (mtx_initialized(&fl->fl_lock)) mtx_destroy(&fl->fl_lock); bzero(fl, sizeof(*fl)); } return (0); } static void add_iq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, struct sge_iq *iq) { struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba, "bus address of descriptor ring"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, iq->qsize * IQ_ESIZE, "descriptor ring size in bytes"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", CTLTYPE_INT | CTLFLAG_RD, &iq->abs_id, 0, sysctl_uint16, "I", "absolute id of the queue"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", CTLTYPE_INT | CTLFLAG_RD, &iq->cntxt_id, 0, sysctl_uint16, "I", "SGE context id of the queue"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", CTLTYPE_INT | CTLFLAG_RD, &iq->cidx, 0, sysctl_uint16, "I", "consumer index"); } static void add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, struct sge_fl *fl) { struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, "freelist"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &fl->ba, "bus address of descriptor ring"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, "desc ring size in bytes"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I", "SGE context id of the freelist"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, fl_pad ? 1 : 0, "padding enabled"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 0, "consumer index"); if (fl->flags & FL_BUF_PACKING) { SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); } SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 0, "producer index"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_allocated", CTLFLAG_RD, &fl->mbuf_allocated, "# of mbuf allocated"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_inlined", CTLFLAG_RD, &fl->mbuf_inlined, "# of mbuf inlined in clusters"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); } static int alloc_fwq(struct adapter *sc) { int rc, intr_idx; struct sge_iq *fwq = &sc->sge.fwq; struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); if (sc->flags & IS_VF) intr_idx = 0; else intr_idx = sc->intr_count > 1 ? 1 : 0; rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1); if (rc != 0) { device_printf(sc->dev, "failed to create firmware event queue: %d\n", rc); return (rc); } oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD, NULL, "firmware event queue"); add_iq_sysctls(&sc->ctx, oid, fwq); return (0); } static int free_fwq(struct adapter *sc) { return free_iq_fl(NULL, &sc->sge.fwq, NULL); } static int alloc_ctrlq(struct adapter *sc, struct sge_wrq *ctrlq, int idx, struct sysctl_oid *oid) { int rc; char name[16]; struct sysctl_oid_list *children; snprintf(name, sizeof(name), "%s ctrlq%d", device_get_nameunit(sc->dev), idx); init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[idx]->tx_chan, sc->sge.fwq.cntxt_id, name); children = SYSCTL_CHILDREN(oid); snprintf(name, sizeof(name), "%d", idx); oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, "ctrl queue"); rc = alloc_wrq(sc, NULL, ctrlq, oid); return (rc); } int tnl_cong(struct port_info *pi, int drop) { if (drop == -1) return (-1); else if (drop == 1) return (0); else return (pi->rx_e_chan_map); } static int alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx, struct sysctl_oid *oid) { int rc; struct adapter *sc = vi->pi->adapter; struct sysctl_oid_list *children; char name[16]; rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx, tnl_cong(vi->pi, cong_drop)); if (rc != 0) return (rc); if (idx == 0) sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id; else KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id, ("iq_base mismatch")); KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF, ("PF with non-zero iq_base")); /* * The freelist is just barely above the starvation threshold right now, * fill it up a bit more. */ FL_LOCK(&rxq->fl); refill_fl(sc, &rxq->fl, 128); FL_UNLOCK(&rxq->fl); #if defined(INET) || defined(INET6) rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs); if (rc != 0) return (rc); MPASS(rxq->lro.ifp == vi->ifp); /* also indicates LRO init'ed */ if (vi->ifp->if_capenable & IFCAP_LRO) rxq->iq.flags |= IQ_LRO_ENABLED; #endif rxq->ifp = vi->ifp; children = SYSCTL_CHILDREN(oid); snprintf(name, sizeof(name), "%d", idx); oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, "rx queue"); children = SYSCTL_CHILDREN(oid); add_iq_sysctls(&vi->ctx, oid, &rxq->iq); #if defined(INET) || defined(INET6) SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, &rxq->lro.lro_queued, 0, NULL); SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, &rxq->lro.lro_flushed, 0, NULL); #endif SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, &rxq->rxcsum, "# of times hardware assisted with checksum"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction", CTLFLAG_RD, &rxq->vlan_extraction, "# of times hardware extracted 802.1Q tag"); add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl); return (rc); } static int free_rxq(struct vi_info *vi, struct sge_rxq *rxq) { int rc; #if defined(INET) || defined(INET6) if (rxq->lro.ifp) { tcp_lro_free(&rxq->lro); rxq->lro.ifp = NULL; } #endif rc = free_iq_fl(vi, &rxq->iq, &rxq->fl); if (rc == 0) bzero(rxq, sizeof(*rxq)); return (rc); } #ifdef TCP_OFFLOAD static int alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, int intr_idx, int idx, struct sysctl_oid *oid) { struct port_info *pi = vi->pi; int rc; struct sysctl_oid_list *children; char name[16]; rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 0); if (rc != 0) return (rc); children = SYSCTL_CHILDREN(oid); snprintf(name, sizeof(name), "%d", idx); oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, "rx queue"); add_iq_sysctls(&vi->ctx, oid, &ofld_rxq->iq); add_fl_sysctls(pi->adapter, &vi->ctx, oid, &ofld_rxq->fl); return (rc); } static int free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq) { int rc; rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl); if (rc == 0) bzero(ofld_rxq, sizeof(*ofld_rxq)); return (rc); } #endif #ifdef DEV_NETMAP static int alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx, int idx, struct sysctl_oid *oid) { int rc; struct sysctl_oid_list *children; struct sysctl_ctx_list *ctx; char name[16]; size_t len; struct adapter *sc = vi->pi->adapter; struct netmap_adapter *na = NA(vi->ifp); MPASS(na != NULL); len = vi->qsize_rxq * IQ_ESIZE; rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); if (rc != 0) return (rc); len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len; rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); if (rc != 0) return (rc); nm_rxq->vi = vi; nm_rxq->nid = idx; nm_rxq->iq_cidx = 0; nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE; nm_rxq->iq_gen = F_RSPD_GEN; nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; nm_rxq->fl_sidx = na->num_rx_desc; nm_rxq->intr_idx = intr_idx; ctx = &vi->ctx; children = SYSCTL_CHILDREN(oid); snprintf(name, sizeof(name), "%d", idx); oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, "rx queue"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16, "I", "absolute id of the queue"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16, "I", "SGE context id of the queue"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I", "consumer index"); children = SYSCTL_CHILDREN(oid); oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, "freelist"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16, "I", "SGE context id of the freelist"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &nm_rxq->fl_cidx, 0, "consumer index"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &nm_rxq->fl_pidx, 0, "producer index"); return (rc); } static int free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) { struct adapter *sc = vi->pi->adapter; free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, nm_rxq->iq_desc); free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, nm_rxq->fl_desc); return (0); } static int alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx, struct sysctl_oid *oid) { int rc; size_t len; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct netmap_adapter *na = NA(vi->ifp); char name[16]; struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, &nm_txq->ba, (void **)&nm_txq->desc); if (rc) return (rc); nm_txq->pidx = nm_txq->cidx = 0; nm_txq->sidx = na->num_tx_desc; nm_txq->nid = idx; nm_txq->iqidx = iqidx; nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | - V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) | - V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) | - V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid))); + V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | + V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); snprintf(name, sizeof(name), "%d", idx); oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, "netmap tx queue"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, &nm_txq->cntxt_id, 0, "SGE context id of the queue"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I", "consumer index"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I", "producer index"); return (rc); } static int free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq) { struct adapter *sc = vi->pi->adapter; free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, nm_txq->desc); return (0); } #endif /* * Returns a reasonable automatic cidx flush threshold for a given queue size. */ static u_int qsize_to_fthresh(int qsize) { u_int fthresh; while (!powerof2(qsize)) qsize++; fthresh = ilog2(qsize); if (fthresh > X_CIDXFLUSHTHRESH_128) fthresh = X_CIDXFLUSHTHRESH_128; return (fthresh); } static int ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) { int rc, cntxt_id; struct fw_eq_ctrl_cmd c; int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | V_FW_EQ_CTRL_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); c.physeqid_pkd = htobe32(0); c.fetchszm_to_iqid = htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | V_FW_EQ_CTRL_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | V_FW_EQ_CTRL_CMD_EQSIZE(qsize)); c.eqaddr = htobe64(eq->ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(sc->dev, "failed to create control queue %d: %d\n", eq->tx_chan, rc); return (rc); } eq->flags |= EQ_ALLOCATED; eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); cntxt_id = eq->cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); sc->sge.eqmap[cntxt_id] = eq; return (rc); } static int eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) { int rc, cntxt_id; struct fw_eq_eth_cmd c; int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | V_FW_EQ_ETH_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); c.fetchszm_to_iqid = htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(eq->iqid)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | V_FW_EQ_ETH_CMD_EQSIZE(qsize)); c.eqaddr = htobe64(eq->ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(vi->dev, "failed to create Ethernet egress queue: %d\n", rc); return (rc); } eq->flags |= EQ_ALLOCATED; eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); cntxt_id = eq->cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); sc->sge.eqmap[cntxt_id] = eq; return (rc); } #ifdef TCP_OFFLOAD static int ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) { int rc, cntxt_id; struct fw_eq_ofld_cmd c; int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; bzero(&c, sizeof(c)); c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | V_FW_EQ_OFLD_CMD_VFN(0)); c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); c.fetchszm_to_iqid = htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) | V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | V_FW_EQ_OFLD_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | V_FW_EQ_OFLD_CMD_EQSIZE(qsize)); c.eqaddr = htobe64(eq->ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(vi->dev, "failed to create egress queue for TCP offload: %d\n", rc); return (rc); } eq->flags |= EQ_ALLOCATED; eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); cntxt_id = eq->cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); sc->sge.eqmap[cntxt_id] = eq; return (rc); } #endif static int alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) { int rc, qsize; size_t len; mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; len = qsize * EQ_ESIZE; rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, &eq->ba, (void **)&eq->desc); if (rc) return (rc); eq->pidx = eq->cidx = eq->dbidx = 0; /* Note that equeqidx is not used with sge_wrq (OFLD/CTRL) queues. */ eq->equeqidx = 0; eq->doorbells = sc->doorbells; switch (eq->flags & EQ_TYPEMASK) { case EQ_CTRL: rc = ctrl_eq_alloc(sc, eq); break; case EQ_ETH: rc = eth_eq_alloc(sc, vi, eq); break; #ifdef TCP_OFFLOAD case EQ_OFLD: rc = ofld_eq_alloc(sc, vi, eq); break; #endif default: panic("%s: invalid eq type %d.", __func__, eq->flags & EQ_TYPEMASK); } if (rc != 0) { device_printf(sc->dev, "failed to allocate egress queue(%d): %d\n", eq->flags & EQ_TYPEMASK, rc); } if (isset(&eq->doorbells, DOORBELL_UDB) || isset(&eq->doorbells, DOORBELL_UDBWC) || isset(&eq->doorbells, DOORBELL_WCWR)) { uint32_t s_qpp = sc->params.sge.eq_s_qpp; uint32_t mask = (1 << s_qpp) - 1; volatile uint8_t *udb; udb = sc->udbs_base + UDBS_DB_OFFSET; udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ eq->udb_qid = eq->cntxt_id & mask; /* id in page */ if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) clrbit(&eq->doorbells, DOORBELL_WCWR); else { udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ eq->udb_qid = 0; } eq->udb = (volatile void *)udb; } return (rc); } static int free_eq(struct adapter *sc, struct sge_eq *eq) { int rc; if (eq->flags & EQ_ALLOCATED) { switch (eq->flags & EQ_TYPEMASK) { case EQ_CTRL: rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); break; case EQ_ETH: rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); break; #ifdef TCP_OFFLOAD case EQ_OFLD: rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); break; #endif default: panic("%s: invalid eq type %d.", __func__, eq->flags & EQ_TYPEMASK); } if (rc != 0) { device_printf(sc->dev, "failed to free egress queue (%d): %d\n", eq->flags & EQ_TYPEMASK, rc); return (rc); } eq->flags &= ~EQ_ALLOCATED; } free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); if (mtx_initialized(&eq->eq_lock)) mtx_destroy(&eq->eq_lock); bzero(eq, sizeof(*eq)); return (0); } static int alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq, struct sysctl_oid *oid) { int rc; struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx; struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); rc = alloc_eq(sc, vi, &wrq->eq); if (rc) return (rc); wrq->adapter = sc; TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); TAILQ_INIT(&wrq->incomplete_wrs); STAILQ_INIT(&wrq->wr_list); wrq->nwr_pending = 0; wrq->ndesc_needed = 0; SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &wrq->eq.ba, "bus address of descriptor ring"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, wrq->eq.sidx * EQ_ESIZE + sc->params.sge.spg_len, "desc ring size in bytes"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I", "consumer index"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I", "producer index"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, wrq->eq.sidx, "status page index"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD, &wrq->tx_wrs_direct, "# of work requests (direct)"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD, &wrq->tx_wrs_copied, "# of work requests (copied)"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD, &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)"); return (rc); } static int free_wrq(struct adapter *sc, struct sge_wrq *wrq) { int rc; rc = free_eq(sc, &wrq->eq); if (rc) return (rc); bzero(wrq, sizeof(*wrq)); return (0); } static int alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx, struct sysctl_oid *oid) { int rc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct sge_eq *eq = &txq->eq; char name[16]; struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx, M_CXGBE, M_WAITOK); if (rc != 0) { device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc); return (rc); } rc = alloc_eq(sc, vi, eq); if (rc != 0) { mp_ring_free(txq->r); txq->r = NULL; return (rc); } /* Can't fail after this point. */ if (idx == 0) sc->sge.eq_base = eq->abs_id - eq->cntxt_id; else KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id, ("eq_base mismatch")); KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF, ("PF with non-zero eq_base")); TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); txq->ifp = vi->ifp; txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); if (sc->flags & IS_VF) txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | V_TXPKT_INTF(pi->tx_chan)); else txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | - V_TXPKT_INTF(pi->tx_chan) | - V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) | - V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) | - V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid))); + V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | + V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); txq->tc_idx = -1; txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, M_ZERO | M_WAITOK); snprintf(name, sizeof(name), "%d", idx); oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, "tx queue"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &eq->ba, "bus address of descriptor ring"); SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, "desc ring size in bytes"); SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, &eq->abs_id, 0, "absolute id of the queue"); SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, &eq->cntxt_id, 0, "SGE context id of the queue"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I", "consumer index"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I", "producer index"); SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, eq->sidx, "status page index"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "tc", CTLTYPE_INT | CTLFLAG_RW, vi, idx, sysctl_tc, "I", "traffic class (-1 means none)"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, &txq->txcsum, "# of times hardware assisted with checksum"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion", CTLFLAG_RD, &txq->vlan_insertion, "# of times hardware inserted 802.1Q tag"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, &txq->tso_wrs, "# of TSO work requests"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, &txq->imm_wrs, "# of work requests with immediate data"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, &txq->sgl_wrs, "# of work requests with direct SGL"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs", CTLFLAG_RD, &txq->txpkts0_wrs, "# of txpkts (type 0) work requests"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs", CTLFLAG_RD, &txq->txpkts1_wrs, "# of txpkts (type 1) work requests"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts", CTLFLAG_RD, &txq->txpkts0_pkts, "# of frames tx'd using type0 txpkts work requests"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts", CTLFLAG_RD, &txq->txpkts1_pkts, "# of frames tx'd using type1 txpkts work requests"); SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_enqueues", CTLFLAG_RD, &txq->r->enqueues, "# of enqueues to the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_drops", CTLFLAG_RD, &txq->r->drops, "# of drops in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_starts", CTLFLAG_RD, &txq->r->starts, "# of normal consumer starts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_stalls", CTLFLAG_RD, &txq->r->stalls, "# of consumer stalls in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_restarts", CTLFLAG_RD, &txq->r->restarts, "# of consumer restarts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_abdications", CTLFLAG_RD, &txq->r->abdications, "# of consumer abdications in the mp_ring for this queue"); return (0); } static int free_txq(struct vi_info *vi, struct sge_txq *txq) { int rc; struct adapter *sc = vi->pi->adapter; struct sge_eq *eq = &txq->eq; rc = free_eq(sc, eq); if (rc) return (rc); sglist_free(txq->gl); free(txq->sdesc, M_CXGBE); mp_ring_free(txq->r); bzero(txq, sizeof(*txq)); return (0); } static void oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *ba = arg; KASSERT(nseg == 1, ("%s meant for single segment mappings only.", __func__)); *ba = error ? 0 : segs->ds_addr; } static inline void ring_fl_db(struct adapter *sc, struct sge_fl *fl) { uint32_t n, v; n = IDXDIFF(fl->pidx / 8, fl->dbidx, fl->sidx); MPASS(n > 0); wmb(); v = fl->dbval | V_PIDX(n); if (fl->udb) *fl->udb = htole32(v); else t4_write_reg(sc, sc->sge_kdoorbell_reg, v); IDXINCR(fl->dbidx, n, fl->sidx); } /* * Fills up the freelist by allocating up to 'n' buffers. Buffers that are * recycled do not count towards this allocation budget. * * Returns non-zero to indicate that this freelist should be added to the list * of starving freelists. */ static int refill_fl(struct adapter *sc, struct sge_fl *fl, int n) { __be64 *d; struct fl_sdesc *sd; uintptr_t pa; caddr_t cl; struct cluster_layout *cll; struct sw_zone_info *swz; struct cluster_metadata *clm; uint16_t max_pidx; uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ FL_LOCK_ASSERT_OWNED(fl); /* * We always stop at the beginning of the hardware descriptor that's just * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, * which would mean an empty freelist to the chip. */ max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; if (fl->pidx == max_pidx * 8) return (0); d = &fl->desc[fl->pidx]; sd = &fl->sdesc[fl->pidx]; cll = &fl->cll_def; /* default layout */ swz = &sc->sge.sw_zone_info[cll->zidx]; while (n > 0) { if (sd->cl != NULL) { if (sd->nmbuf == 0) { /* * Fast recycle without involving any atomics on * the cluster's metadata (if the cluster has * metadata). This happens when all frames * received in the cluster were small enough to * fit within a single mbuf each. */ fl->cl_fast_recycled++; #ifdef INVARIANTS clm = cl_metadata(sc, fl, &sd->cll, sd->cl); if (clm != NULL) MPASS(clm->refcount == 1); #endif goto recycled_fast; } /* * Cluster is guaranteed to have metadata. Clusters * without metadata always take the fast recycle path * when they're recycled. */ clm = cl_metadata(sc, fl, &sd->cll, sd->cl); MPASS(clm != NULL); if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { fl->cl_recycled++; counter_u64_add(extfree_rels, 1); goto recycled; } sd->cl = NULL; /* gave up my reference */ } MPASS(sd->cl == NULL); alloc: cl = uma_zalloc(swz->zone, M_NOWAIT); if (__predict_false(cl == NULL)) { if (cll == &fl->cll_alt || fl->cll_alt.zidx == -1 || fl->cll_def.zidx == fl->cll_alt.zidx) break; /* fall back to the safe zone */ cll = &fl->cll_alt; swz = &sc->sge.sw_zone_info[cll->zidx]; goto alloc; } fl->cl_allocated++; n--; pa = pmap_kextract((vm_offset_t)cl); pa += cll->region1; sd->cl = cl; sd->cll = *cll; *d = htobe64(pa | cll->hwidx); clm = cl_metadata(sc, fl, cll, cl); if (clm != NULL) { recycled: #ifdef INVARIANTS clm->sd = sd; #endif clm->refcount = 1; } sd->nmbuf = 0; recycled_fast: d++; sd++; if (__predict_false(++fl->pidx % 8 == 0)) { uint16_t pidx = fl->pidx / 8; if (__predict_false(pidx == fl->sidx)) { fl->pidx = 0; pidx = 0; sd = fl->sdesc; d = fl->desc; } if (pidx == max_pidx) break; if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) ring_fl_db(sc, fl); } } if (fl->pidx / 8 != fl->dbidx) ring_fl_db(sc, fl); return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); } /* * Attempt to refill all starving freelists. */ static void refill_sfl(void *arg) { struct adapter *sc = arg; struct sge_fl *fl, *fl_temp; mtx_assert(&sc->sfl_lock, MA_OWNED); TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { FL_LOCK(fl); refill_fl(sc, fl, 64); if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { TAILQ_REMOVE(&sc->sfl, fl, link); fl->flags &= ~FL_STARVING; } FL_UNLOCK(fl); } if (!TAILQ_EMPTY(&sc->sfl)) callout_schedule(&sc->sfl_callout, hz / 5); } static int alloc_fl_sdesc(struct sge_fl *fl) { fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE, M_ZERO | M_WAITOK); return (0); } static void free_fl_sdesc(struct adapter *sc, struct sge_fl *fl) { struct fl_sdesc *sd; struct cluster_metadata *clm; struct cluster_layout *cll; int i; sd = fl->sdesc; for (i = 0; i < fl->sidx * 8; i++, sd++) { if (sd->cl == NULL) continue; cll = &sd->cll; clm = cl_metadata(sc, fl, cll, sd->cl); if (sd->nmbuf == 0) uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) { uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); counter_u64_add(extfree_rels, 1); } sd->cl = NULL; } free(fl->sdesc, M_CXGBE); fl->sdesc = NULL; } static inline void get_pkt_gl(struct mbuf *m, struct sglist *gl) { int rc; M_ASSERTPKTHDR(m); sglist_reset(gl); rc = sglist_append_mbuf(gl, m); if (__predict_false(rc != 0)) { panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " "with %d.", __func__, m, mbuf_nsegs(m), rc); } KASSERT(gl->sg_nseg == mbuf_nsegs(m), ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, mbuf_nsegs(m), gl->sg_nseg)); KASSERT(gl->sg_nseg > 0 && gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS), ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)); } /* * len16 for a txpkt WR with a GL. Includes the firmware work request header. */ static inline u_int txpkt_len16(u_int nsegs, u_int tso) { u_int n; MPASS(nsegs > 0); nsegs--; /* first segment is part of ulptx_sgl */ n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); if (tso) n += sizeof(struct cpl_tx_pkt_lso_core); return (howmany(n, 16)); } /* * len16 for a txpkt_vm WR with a GL. Includes the firmware work * request header. */ static inline u_int txpkt_vm_len16(u_int nsegs, u_int tso) { u_int n; MPASS(nsegs > 0); nsegs--; /* first segment is part of ulptx_sgl */ n = sizeof(struct fw_eth_tx_pkt_vm_wr) + sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); if (tso) n += sizeof(struct cpl_tx_pkt_lso_core); return (howmany(n, 16)); } /* * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work * request header. */ static inline u_int txpkts0_len16(u_int nsegs) { u_int n; MPASS(nsegs > 0); nsegs--; /* first segment is part of ulptx_sgl */ n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) + sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); return (howmany(n, 16)); } /* * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work * request header. */ static inline u_int txpkts1_len16(void) { u_int n; n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl); return (howmany(n, 16)); } static inline u_int imm_payload(u_int ndesc) { u_int n; n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - sizeof(struct cpl_tx_pkt_core); return (n); } /* * Write a VM txpkt WR for this packet to the hardware descriptors, update the * software descriptor, and advance the pidx. It is guaranteed that enough * descriptors are available. * * The return value is the # of hardware descriptors used. */ static u_int write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, struct fw_eth_tx_pkt_vm_wr *wr, struct mbuf *m0, u_int available) { struct sge_eq *eq = &txq->eq; struct tx_sdesc *txsd; struct cpl_tx_pkt_core *cpl; uint32_t ctrl; /* used in many unrelated places */ uint64_t ctrl1; int csum_type, len16, ndesc, pktlen, nsegs; caddr_t dst; TXQ_LOCK_ASSERT_OWNED(txq); M_ASSERTPKTHDR(m0); MPASS(available > 0 && available < eq->sidx); len16 = mbuf_len16(m0); nsegs = mbuf_nsegs(m0); pktlen = m0->m_pkthdr.len; ctrl = sizeof(struct cpl_tx_pkt_core); if (needs_tso(m0)) ctrl += sizeof(struct cpl_tx_pkt_lso_core); ndesc = howmany(len16, EQ_ESIZE / 16); MPASS(ndesc <= available); /* Firmware work request header */ MPASS(wr == (void *)&eq->desc[eq->pidx]); wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); ctrl = V_FW_WR_LEN16(len16); wr->equiq_to_len16 = htobe32(ctrl); wr->r3[0] = 0; wr->r3[1] = 0; /* * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci. * vlantci is ignored unless the ethtype is 0x8100, so it's * simpler to always copy it rather than making it * conditional. Also, it seems that we do not have to set * vlantci or fake the ethtype when doing VLAN tag insertion. */ m_copydata(m0, 0, sizeof(struct ether_header) + 2, wr->ethmacdst); csum_type = -1; if (needs_tso(m0)) { struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && m0->m_pkthdr.l4hlen > 0, ("%s: mbuf %p needs TSO but missing header lengths", __func__, m0)); ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header)) ctrl |= V_LSO_ETHHDR_LEN(1); if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) ctrl |= F_LSO_IPV6; lso->lso_ctrl = htobe32(ctrl); lso->ipid_ofst = htobe16(0); lso->mss = htobe16(m0->m_pkthdr.tso_segsz); lso->seqno_offset = htobe32(0); lso->len = htobe32(pktlen); if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) csum_type = TX_CSUM_TCPIP6; else csum_type = TX_CSUM_TCPIP; cpl = (void *)(lso + 1); txq->tso_wrs++; } else { if (m0->m_pkthdr.csum_flags & CSUM_IP_TCP) csum_type = TX_CSUM_TCPIP; else if (m0->m_pkthdr.csum_flags & CSUM_IP_UDP) csum_type = TX_CSUM_UDPIP; else if (m0->m_pkthdr.csum_flags & CSUM_IP6_TCP) csum_type = TX_CSUM_TCPIP6; else if (m0->m_pkthdr.csum_flags & CSUM_IP6_UDP) csum_type = TX_CSUM_UDPIP6; #if defined(INET) else if (m0->m_pkthdr.csum_flags & CSUM_IP) { /* * XXX: The firmware appears to stomp on the * fragment/flags field of the IP header when * using TX_CSUM_IP. Fall back to doing * software checksums. */ u_short *sump; struct mbuf *m; int offset; m = m0; offset = 0; sump = m_advance(&m, &offset, m0->m_pkthdr.l2hlen + offsetof(struct ip, ip_sum)); *sump = in_cksum_skip(m0, m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen, m0->m_pkthdr.l2hlen); m0->m_pkthdr.csum_flags &= ~CSUM_IP; } #endif cpl = (void *)(wr + 1); } /* Checksum offload */ ctrl1 = 0; if (needs_l3_csum(m0) == 0) ctrl1 |= F_TXPKT_IPCSUM_DIS; if (csum_type >= 0) { KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0, ("%s: mbuf %p needs checksum offload but missing header lengths", __func__, m0)); if (chip_id(sc) <= CHELSIO_T5) { ctrl1 |= V_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen - ETHER_HDR_LEN); } else { ctrl1 |= V_T6_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen - ETHER_HDR_LEN); } ctrl1 |= V_TXPKT_IPHDR_LEN(m0->m_pkthdr.l3hlen); ctrl1 |= V_TXPKT_CSUM_TYPE(csum_type); } else ctrl1 |= F_TXPKT_L4CSUM_DIS; if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) txq->txcsum++; /* some hardware assistance provided */ /* VLAN tag insertion */ if (needs_vlan_insertion(m0)) { ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); txq->vlan_insertion++; } /* CPL header */ cpl->ctrl0 = txq->cpl_ctrl0; cpl->pack = 0; cpl->len = htobe16(pktlen); cpl->ctrl1 = htobe64(ctrl1); /* SGL */ dst = (void *)(cpl + 1); /* * A packet using TSO will use up an entire descriptor for the * firmware work request header, LSO CPL, and TX_PKT_XT CPL. * If this descriptor is the last descriptor in the ring, wrap * around to the front of the ring explicitly for the start of * the sgl. */ if (dst == (void *)&eq->desc[eq->sidx]) { dst = (void *)&eq->desc[0]; write_gl_to_txd(txq, m0, &dst, 0); } else write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); txq->sgl_wrs++; txq->txpkt_wrs++; txsd = &txq->sdesc[eq->pidx]; txsd->m = m0; txsd->desc_used = ndesc; return (ndesc); } /* * Write a txpkt WR for this packet to the hardware descriptors, update the * software descriptor, and advance the pidx. It is guaranteed that enough * descriptors are available. * * The return value is the # of hardware descriptors used. */ static u_int write_txpkt_wr(struct sge_txq *txq, struct fw_eth_tx_pkt_wr *wr, struct mbuf *m0, u_int available) { struct sge_eq *eq = &txq->eq; struct tx_sdesc *txsd; struct cpl_tx_pkt_core *cpl; uint32_t ctrl; /* used in many unrelated places */ uint64_t ctrl1; int len16, ndesc, pktlen, nsegs; caddr_t dst; TXQ_LOCK_ASSERT_OWNED(txq); M_ASSERTPKTHDR(m0); MPASS(available > 0 && available < eq->sidx); len16 = mbuf_len16(m0); nsegs = mbuf_nsegs(m0); pktlen = m0->m_pkthdr.len; ctrl = sizeof(struct cpl_tx_pkt_core); if (needs_tso(m0)) ctrl += sizeof(struct cpl_tx_pkt_lso_core); else if (pktlen <= imm_payload(2) && available >= 2) { /* Immediate data. Recalculate len16 and set nsegs to 0. */ ctrl += pktlen; len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) + pktlen, 16); nsegs = 0; } ndesc = howmany(len16, EQ_ESIZE / 16); MPASS(ndesc <= available); /* Firmware work request header */ MPASS(wr == (void *)&eq->desc[eq->pidx]); wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); ctrl = V_FW_WR_LEN16(len16); wr->equiq_to_len16 = htobe32(ctrl); wr->r3 = 0; if (needs_tso(m0)) { struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && m0->m_pkthdr.l4hlen > 0, ("%s: mbuf %p needs TSO but missing header lengths", __func__, m0)); ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header)) ctrl |= V_LSO_ETHHDR_LEN(1); if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) ctrl |= F_LSO_IPV6; lso->lso_ctrl = htobe32(ctrl); lso->ipid_ofst = htobe16(0); lso->mss = htobe16(m0->m_pkthdr.tso_segsz); lso->seqno_offset = htobe32(0); lso->len = htobe32(pktlen); cpl = (void *)(lso + 1); txq->tso_wrs++; } else cpl = (void *)(wr + 1); /* Checksum offload */ ctrl1 = 0; if (needs_l3_csum(m0) == 0) ctrl1 |= F_TXPKT_IPCSUM_DIS; if (needs_l4_csum(m0) == 0) ctrl1 |= F_TXPKT_L4CSUM_DIS; if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) txq->txcsum++; /* some hardware assistance provided */ /* VLAN tag insertion */ if (needs_vlan_insertion(m0)) { ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); txq->vlan_insertion++; } /* CPL header */ cpl->ctrl0 = txq->cpl_ctrl0; cpl->pack = 0; cpl->len = htobe16(pktlen); cpl->ctrl1 = htobe64(ctrl1); /* SGL */ dst = (void *)(cpl + 1); if (nsegs > 0) { write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); txq->sgl_wrs++; } else { struct mbuf *m; for (m = m0; m != NULL; m = m->m_next) { copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); #ifdef INVARIANTS pktlen -= m->m_len; #endif } #ifdef INVARIANTS KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); #endif txq->imm_wrs++; } txq->txpkt_wrs++; txsd = &txq->sdesc[eq->pidx]; txsd->m = m0; txsd->desc_used = ndesc; return (ndesc); } static int try_txpkts(struct mbuf *m, struct mbuf *n, struct txpkts *txp, u_int available) { u_int needed, nsegs1, nsegs2, l1, l2; if (cannot_use_txpkts(m) || cannot_use_txpkts(n)) return (1); nsegs1 = mbuf_nsegs(m); nsegs2 = mbuf_nsegs(n); if (nsegs1 + nsegs2 == 2) { txp->wr_type = 1; l1 = l2 = txpkts1_len16(); } else { txp->wr_type = 0; l1 = txpkts0_len16(nsegs1); l2 = txpkts0_len16(nsegs2); } txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + l1 + l2; needed = howmany(txp->len16, EQ_ESIZE / 16); if (needed > SGE_MAX_WR_NDESC || needed > available) return (1); txp->plen = m->m_pkthdr.len + n->m_pkthdr.len; if (txp->plen > 65535) return (1); txp->npkt = 2; set_mbuf_len16(m, l1); set_mbuf_len16(n, l2); return (0); } static int add_to_txpkts(struct mbuf *m, struct txpkts *txp, u_int available) { u_int plen, len16, needed, nsegs; MPASS(txp->wr_type == 0 || txp->wr_type == 1); nsegs = mbuf_nsegs(m); if (needs_tso(m) || (txp->wr_type == 1 && nsegs != 1)) return (1); plen = txp->plen + m->m_pkthdr.len; if (plen > 65535) return (1); if (txp->wr_type == 0) len16 = txpkts0_len16(nsegs); else len16 = txpkts1_len16(); needed = howmany(txp->len16 + len16, EQ_ESIZE / 16); if (needed > SGE_MAX_WR_NDESC || needed > available) return (1); txp->npkt++; txp->plen = plen; txp->len16 += len16; set_mbuf_len16(m, len16); return (0); } /* * Write a txpkts WR for the packets in txp to the hardware descriptors, update * the software descriptor, and advance the pidx. It is guaranteed that enough * descriptors are available. * * The return value is the # of hardware descriptors used. */ static u_int write_txpkts_wr(struct sge_txq *txq, struct fw_eth_tx_pkts_wr *wr, struct mbuf *m0, const struct txpkts *txp, u_int available) { struct sge_eq *eq = &txq->eq; struct tx_sdesc *txsd; struct cpl_tx_pkt_core *cpl; uint32_t ctrl; uint64_t ctrl1; int ndesc, checkwrap; struct mbuf *m; void *flitp; TXQ_LOCK_ASSERT_OWNED(txq); MPASS(txp->npkt > 0); MPASS(txp->plen < 65536); MPASS(m0 != NULL); MPASS(m0->m_nextpkt != NULL); MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); MPASS(available > 0 && available < eq->sidx); ndesc = howmany(txp->len16, EQ_ESIZE / 16); MPASS(ndesc <= available); MPASS(wr == (void *)&eq->desc[eq->pidx]); wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); ctrl = V_FW_WR_LEN16(txp->len16); wr->equiq_to_len16 = htobe32(ctrl); wr->plen = htobe16(txp->plen); wr->npkt = txp->npkt; wr->r3 = 0; wr->type = txp->wr_type; flitp = wr + 1; /* * At this point we are 16B into a hardware descriptor. If checkwrap is * set then we know the WR is going to wrap around somewhere. We'll * check for that at appropriate points. */ checkwrap = eq->sidx - ndesc < eq->pidx; for (m = m0; m != NULL; m = m->m_nextpkt) { if (txp->wr_type == 0) { struct ulp_txpkt *ulpmc; struct ulptx_idata *ulpsc; /* ULP master command */ ulpmc = flitp; ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); ulpmc->len = htobe32(mbuf_len16(m)); /* ULP subcommand */ ulpsc = (void *)(ulpmc + 1); ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | F_ULP_TX_SC_MORE); ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); cpl = (void *)(ulpsc + 1); if (checkwrap && (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) cpl = (void *)&eq->desc[0]; } else { cpl = flitp; } /* Checksum offload */ ctrl1 = 0; if (needs_l3_csum(m) == 0) ctrl1 |= F_TXPKT_IPCSUM_DIS; if (needs_l4_csum(m) == 0) ctrl1 |= F_TXPKT_L4CSUM_DIS; if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) txq->txcsum++; /* some hardware assistance provided */ /* VLAN tag insertion */ if (needs_vlan_insertion(m)) { ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); txq->vlan_insertion++; } /* CPL header */ cpl->ctrl0 = txq->cpl_ctrl0; cpl->pack = 0; cpl->len = htobe16(m->m_pkthdr.len); cpl->ctrl1 = htobe64(ctrl1); flitp = cpl + 1; if (checkwrap && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) flitp = (void *)&eq->desc[0]; write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap); } if (txp->wr_type == 0) { txq->txpkts0_pkts += txp->npkt; txq->txpkts0_wrs++; } else { txq->txpkts1_pkts += txp->npkt; txq->txpkts1_wrs++; } txsd = &txq->sdesc[eq->pidx]; txsd->m = m0; txsd->desc_used = ndesc; return (ndesc); } /* * If the SGL ends on an address that is not 16 byte aligned, this function will * add a 0 filled flit at the end. */ static void write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap) { struct sge_eq *eq = &txq->eq; struct sglist *gl = txq->gl; struct sglist_seg *seg; __be64 *flitp, *wrap; struct ulptx_sgl *usgl; int i, nflits, nsegs; KASSERT(((uintptr_t)(*to) & 0xf) == 0, ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); get_pkt_gl(m, gl); nsegs = gl->sg_nseg; MPASS(nsegs > 0); nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; flitp = (__be64 *)(*to); wrap = (__be64 *)(&eq->desc[eq->sidx]); seg = &gl->sg_segs[0]; usgl = (void *)flitp; /* * We start at a 16 byte boundary somewhere inside the tx descriptor * ring, so we're at least 16 bytes away from the status page. There is * no chance of a wrap around in the middle of usgl (which is 16 bytes). */ usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(nsegs)); usgl->len0 = htobe32(seg->ss_len); usgl->addr0 = htobe64(seg->ss_paddr); seg++; if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) { /* Won't wrap around at all */ for (i = 0; i < nsegs - 1; i++, seg++) { usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); } if (i & 1) usgl->sge[i / 2].len[1] = htobe32(0); flitp += nflits; } else { /* Will wrap somewhere in the rest of the SGL */ /* 2 flits already written, write the rest flit by flit */ flitp = (void *)(usgl + 1); for (i = 0; i < nflits - 2; i++) { if (flitp == wrap) flitp = (void *)eq->desc; *flitp++ = get_flit(seg, nsegs - 1, i); } } if (nflits & 1) { MPASS(((uintptr_t)flitp) & 0xf); *flitp++ = 0; } MPASS((((uintptr_t)flitp) & 0xf) == 0); if (__predict_false(flitp == wrap)) *to = (void *)eq->desc; else *to = (void *)flitp; } static inline void copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) { MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); if (__predict_true((uintptr_t)(*to) + len <= (uintptr_t)&eq->desc[eq->sidx])) { bcopy(from, *to, len); (*to) += len; } else { int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); bcopy(from, *to, portion); from += portion; portion = len - portion; /* remaining */ bcopy(from, (void *)eq->desc, portion); (*to) = (caddr_t)eq->desc + portion; } } static inline void ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n) { u_int db; MPASS(n > 0); db = eq->doorbells; if (n > 1) clrbit(&db, DOORBELL_WCWR); wmb(); switch (ffs(db) - 1) { case DOORBELL_UDB: *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); break; case DOORBELL_WCWR: { volatile uint64_t *dst, *src; int i; /* * Queues whose 128B doorbell segment fits in the page do not * use relative qid (udb_qid is always 0). Only queues with * doorbell segments can do WCWR. */ KASSERT(eq->udb_qid == 0 && n == 1, ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", __func__, eq->doorbells, n, eq->dbidx, eq)); dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - UDBS_DB_OFFSET); i = eq->dbidx; src = (void *)&eq->desc[i]; while (src != (void *)&eq->desc[i + 1]) *dst++ = *src++; wmb(); break; } case DOORBELL_UDBWC: *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); wmb(); break; case DOORBELL_KDB: t4_write_reg(sc, sc->sge_kdoorbell_reg, V_QID(eq->cntxt_id) | V_PIDX(n)); break; } IDXINCR(eq->dbidx, n, eq->sidx); } static inline u_int reclaimable_tx_desc(struct sge_eq *eq) { uint16_t hw_cidx; hw_cidx = read_hw_cidx(eq); return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); } static inline u_int total_available_tx_desc(struct sge_eq *eq) { uint16_t hw_cidx, pidx; hw_cidx = read_hw_cidx(eq); pidx = eq->pidx; if (pidx == hw_cidx) return (eq->sidx - 1); else return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); } static inline uint16_t read_hw_cidx(struct sge_eq *eq) { struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; uint16_t cidx = spg->cidx; /* stable snapshot */ return (be16toh(cidx)); } /* * Reclaim 'n' descriptors approximately. */ static u_int reclaim_tx_descs(struct sge_txq *txq, u_int n) { struct tx_sdesc *txsd; struct sge_eq *eq = &txq->eq; u_int can_reclaim, reclaimed; TXQ_LOCK_ASSERT_OWNED(txq); MPASS(n > 0); reclaimed = 0; can_reclaim = reclaimable_tx_desc(eq); while (can_reclaim && reclaimed < n) { int ndesc; struct mbuf *m, *nextpkt; txsd = &txq->sdesc[eq->cidx]; ndesc = txsd->desc_used; /* Firmware doesn't return "partial" credits. */ KASSERT(can_reclaim >= ndesc, ("%s: unexpected number of credits: %d, %d", __func__, can_reclaim, ndesc)); KASSERT(ndesc != 0, ("%s: descriptor with no credits: cidx %d", __func__, eq->cidx)); for (m = txsd->m; m != NULL; m = nextpkt) { nextpkt = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); } reclaimed += ndesc; can_reclaim -= ndesc; IDXINCR(eq->cidx, ndesc, eq->sidx); } return (reclaimed); } static void tx_reclaim(void *arg, int n) { struct sge_txq *txq = arg; struct sge_eq *eq = &txq->eq; do { if (TXQ_TRYLOCK(txq) == 0) break; n = reclaim_tx_descs(txq, 32); if (eq->cidx == eq->pidx) eq->equeqidx = eq->pidx; TXQ_UNLOCK(txq); } while (n > 0); } static __be64 get_flit(struct sglist_seg *segs, int nsegs, int idx) { int i = (idx / 3) * 2; switch (idx % 3) { case 0: { uint64_t rc; rc = (uint64_t)segs[i].ss_len << 32; if (i + 1 < nsegs) rc |= (uint64_t)(segs[i + 1].ss_len); return (htobe64(rc)); } case 1: return (htobe64(segs[i].ss_paddr)); case 2: return (htobe64(segs[i + 1].ss_paddr)); } return (0); } static void find_best_refill_source(struct adapter *sc, struct sge_fl *fl, int maxp) { int8_t zidx, hwidx, idx; uint16_t region1, region3; int spare, spare_needed, n; struct sw_zone_info *swz; struct hw_buf_info *hwb, *hwb_list = &sc->sge.hw_buf_info[0]; /* * Buffer Packing: Look for PAGE_SIZE or larger zone which has a bufsize * large enough for the max payload and cluster metadata. Otherwise * settle for the largest bufsize that leaves enough room in the cluster * for metadata. * * Without buffer packing: Look for the smallest zone which has a * bufsize large enough for the max payload. Settle for the largest * bufsize available if there's nothing big enough for max payload. */ spare_needed = fl->flags & FL_BUF_PACKING ? CL_METADATA_SIZE : 0; swz = &sc->sge.sw_zone_info[0]; hwidx = -1; for (zidx = 0; zidx < SW_ZONE_SIZES; zidx++, swz++) { if (swz->size > largest_rx_cluster) { if (__predict_true(hwidx != -1)) break; /* * This is a misconfiguration. largest_rx_cluster is * preventing us from finding a refill source. See * dev.t5nex..buffer_sizes to figure out why. */ device_printf(sc->dev, "largest_rx_cluster=%u leaves no" " refill source for fl %p (dma %u). Ignored.\n", largest_rx_cluster, fl, maxp); } for (idx = swz->head_hwidx; idx != -1; idx = hwb->next) { hwb = &hwb_list[idx]; spare = swz->size - hwb->size; if (spare < spare_needed) continue; hwidx = idx; /* best option so far */ if (hwb->size >= maxp) { if ((fl->flags & FL_BUF_PACKING) == 0) goto done; /* stop looking (not packing) */ if (swz->size >= safest_rx_cluster) goto done; /* stop looking (packing) */ } break; /* keep looking, next zone */ } } done: /* A usable hwidx has been located. */ MPASS(hwidx != -1); hwb = &hwb_list[hwidx]; zidx = hwb->zidx; swz = &sc->sge.sw_zone_info[zidx]; region1 = 0; region3 = swz->size - hwb->size; /* * Stay within this zone and see if there is a better match when mbuf * inlining is allowed. Remember that the hwidx's are sorted in * decreasing order of size (so in increasing order of spare area). */ for (idx = hwidx; idx != -1; idx = hwb->next) { hwb = &hwb_list[idx]; spare = swz->size - hwb->size; if (allow_mbufs_in_cluster == 0 || hwb->size < maxp) break; /* * Do not inline mbufs if doing so would violate the pad/pack * boundary alignment requirement. */ if (fl_pad && (MSIZE % sc->params.sge.pad_boundary) != 0) continue; if (fl->flags & FL_BUF_PACKING && (MSIZE % sc->params.sge.pack_boundary) != 0) continue; if (spare < CL_METADATA_SIZE + MSIZE) continue; n = (spare - CL_METADATA_SIZE) / MSIZE; if (n > howmany(hwb->size, maxp)) break; hwidx = idx; if (fl->flags & FL_BUF_PACKING) { region1 = n * MSIZE; region3 = spare - region1; } else { region1 = MSIZE; region3 = spare - region1; break; } } KASSERT(zidx >= 0 && zidx < SW_ZONE_SIZES, ("%s: bad zone %d for fl %p, maxp %d", __func__, zidx, fl, maxp)); KASSERT(hwidx >= 0 && hwidx <= SGE_FLBUF_SIZES, ("%s: bad hwidx %d for fl %p, maxp %d", __func__, hwidx, fl, maxp)); KASSERT(region1 + sc->sge.hw_buf_info[hwidx].size + region3 == sc->sge.sw_zone_info[zidx].size, ("%s: bad buffer layout for fl %p, maxp %d. " "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, sc->sge.sw_zone_info[zidx].size, region1, sc->sge.hw_buf_info[hwidx].size, region3)); if (fl->flags & FL_BUF_PACKING || region1 > 0) { KASSERT(region3 >= CL_METADATA_SIZE, ("%s: no room for metadata. fl %p, maxp %d; " "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, sc->sge.sw_zone_info[zidx].size, region1, sc->sge.hw_buf_info[hwidx].size, region3)); KASSERT(region1 % MSIZE == 0, ("%s: bad mbuf region for fl %p, maxp %d. " "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, sc->sge.sw_zone_info[zidx].size, region1, sc->sge.hw_buf_info[hwidx].size, region3)); } fl->cll_def.zidx = zidx; fl->cll_def.hwidx = hwidx; fl->cll_def.region1 = region1; fl->cll_def.region3 = region3; } static void find_safe_refill_source(struct adapter *sc, struct sge_fl *fl) { struct sge *s = &sc->sge; struct hw_buf_info *hwb; struct sw_zone_info *swz; int spare; int8_t hwidx; if (fl->flags & FL_BUF_PACKING) hwidx = s->safe_hwidx2; /* with room for metadata */ else if (allow_mbufs_in_cluster && s->safe_hwidx2 != -1) { hwidx = s->safe_hwidx2; hwb = &s->hw_buf_info[hwidx]; swz = &s->sw_zone_info[hwb->zidx]; spare = swz->size - hwb->size; /* no good if there isn't room for an mbuf as well */ if (spare < CL_METADATA_SIZE + MSIZE) hwidx = s->safe_hwidx1; } else hwidx = s->safe_hwidx1; if (hwidx == -1) { /* No fallback source */ fl->cll_alt.hwidx = -1; fl->cll_alt.zidx = -1; return; } hwb = &s->hw_buf_info[hwidx]; swz = &s->sw_zone_info[hwb->zidx]; spare = swz->size - hwb->size; fl->cll_alt.hwidx = hwidx; fl->cll_alt.zidx = hwb->zidx; if (allow_mbufs_in_cluster && (fl_pad == 0 || (MSIZE % sc->params.sge.pad_boundary) == 0)) fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE; else fl->cll_alt.region1 = 0; fl->cll_alt.region3 = spare - fl->cll_alt.region1; } static void add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) { mtx_lock(&sc->sfl_lock); FL_LOCK(fl); if ((fl->flags & FL_DOOMED) == 0) { fl->flags |= FL_STARVING; TAILQ_INSERT_TAIL(&sc->sfl, fl, link); callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); } FL_UNLOCK(fl); mtx_unlock(&sc->sfl_lock); } static void handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq) { struct sge_wrq *wrq = (void *)eq; atomic_readandclear_int(&eq->equiq); taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task); } static void handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq) { struct sge_txq *txq = (void *)eq; MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH); atomic_readandclear_int(&eq->equiq); mp_ring_check_drainage(txq->r, 0); taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task); } static int handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); struct adapter *sc = iq->adapter; struct sge *s = &sc->sge; struct sge_eq *eq; static void (*h[])(struct adapter *, struct sge_eq *) = {NULL, &handle_wrq_egr_update, &handle_eth_egr_update, &handle_wrq_egr_update}; KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, rss->opcode)); eq = s->eqmap[qid - s->eq_start - s->eq_base]; (*h[eq->flags & EQ_TYPEMASK])(sc, eq); return (0); } /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ offsetof(struct cpl_fw6_msg, data)); static int handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, rss->opcode)); if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { const struct rss_header *rss2; rss2 = (const struct rss_header *)&cpl->data[0]; return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); } return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); } /** * t4_handle_wrerr_rpl - process a FW work request error message * @adap: the adapter * @rpl: start of the FW message */ static int t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl) { u8 opcode = *(const u8 *)rpl; const struct fw_error_cmd *e = (const void *)rpl; unsigned int i; if (opcode != FW_ERROR_CMD) { log(LOG_ERR, "%s: Received WRERR_RPL message with opcode %#x\n", device_get_nameunit(adap->dev), opcode); return (EINVAL); } log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev), G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" : "non-fatal"); switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) { case FW_ERROR_TYPE_EXCEPTION: log(LOG_ERR, "exception info:\n"); for (i = 0; i < nitems(e->u.exception.info); i++) log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ", be32toh(e->u.exception.info[i])); log(LOG_ERR, "\n"); break; case FW_ERROR_TYPE_HWMODULE: log(LOG_ERR, "HW module regaddr %08x regval %08x\n", be32toh(e->u.hwmodule.regaddr), be32toh(e->u.hwmodule.regval)); break; case FW_ERROR_TYPE_WR: log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n", be16toh(e->u.wr.cidx), G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)), G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)), be32toh(e->u.wr.eqid)); for (i = 0; i < nitems(e->u.wr.wrhdr); i++) log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ", e->u.wr.wrhdr[i]); log(LOG_ERR, "\n"); break; case FW_ERROR_TYPE_ACL: log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s", be16toh(e->u.acl.cidx), G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)), G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)), be32toh(e->u.acl.eqid), G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" : "MAC"); for (i = 0; i < nitems(e->u.acl.val); i++) log(LOG_ERR, " %02x", e->u.acl.val[i]); log(LOG_ERR, "\n"); break; default: log(LOG_ERR, "type %#x\n", G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))); return (EINVAL); } return (0); } static int sysctl_uint16(SYSCTL_HANDLER_ARGS) { uint16_t *id = arg1; int i = *id; return sysctl_handle_int(oidp, &i, 0, req); } static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS) { struct sge *s = arg1; struct hw_buf_info *hwb = &s->hw_buf_info[0]; struct sw_zone_info *swz = &s->sw_zone_info[0]; int i, rc; struct sbuf sb; char c; sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { if (hwb->zidx >= 0 && swz[hwb->zidx].size <= largest_rx_cluster) c = '*'; else c = '\0'; sbuf_printf(&sb, "%u%c ", hwb->size, c); } sbuf_trim(&sb); sbuf_finish(&sb); rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); sbuf_delete(&sb); return (rc); } Index: stable/11/sys/dev/cxgbe/tom/t4_cpl_io.c =================================================================== --- stable/11/sys/dev/cxgbe/tom/t4_cpl_io.c (revision 346966) +++ stable/11/sys/dev/cxgbe/tom/t4_cpl_io.c (revision 346967) @@ -1,2355 +1,2355 @@ /*- * Copyright (c) 2012, 2015 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #ifdef TCP_OFFLOAD #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TCPSTATES #include #include #include #include #include #include #include #include #include #include #include "common/common.h" #include "common/t4_msg.h" #include "common/t4_regs.h" #include "common/t4_tcb.h" #include "tom/t4_tom_l2t.h" #include "tom/t4_tom.h" VNET_DECLARE(int, tcp_do_autosndbuf); #define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf) VNET_DECLARE(int, tcp_autosndbuf_inc); #define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc) VNET_DECLARE(int, tcp_autosndbuf_max); #define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max) VNET_DECLARE(int, tcp_do_autorcvbuf); #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) VNET_DECLARE(int, tcp_autorcvbuf_inc); #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) VNET_DECLARE(int, tcp_autorcvbuf_max); #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) static void t4_aiotx_cancel(struct kaiocb *job); static void t4_aiotx_queue_toep(struct toepcb *toep); static size_t aiotx_mbuf_pgoff(struct mbuf *m) { struct aiotx_buffer *ab; MPASS(IS_AIOTX_MBUF(m)); ab = m->m_ext.ext_arg1; return ((ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) % PAGE_SIZE); } static vm_page_t * aiotx_mbuf_pages(struct mbuf *m) { struct aiotx_buffer *ab; int npages; MPASS(IS_AIOTX_MBUF(m)); ab = m->m_ext.ext_arg1; npages = (ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) / PAGE_SIZE; return (ab->ps.pages + npages); } void send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) { struct wrqe *wr; struct fw_flowc_wr *flowc; unsigned int nparams, flowclen, paramidx; struct vi_info *vi = toep->vi; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; - unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN; + unsigned int pfvf = sc->pf << S_FW_VIID_PFN; struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), ("%s: flowc for tid %u sent already", __func__, toep->tid)); if (ftxp != NULL) nparams = 8; else nparams = 6; if (toep->ulp_mode == ULP_MODE_TLS) nparams++; if (toep->tls.fcplenmax != 0) nparams++; if (toep->tc_idx != -1) { MPASS(toep->tc_idx >= 0 && toep->tc_idx < sc->chip_params->nsched_cls); nparams++; } flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); if (wr == NULL) { /* XXX */ panic("%s: allocation failure.", __func__); } flowc = wrtod(wr); memset(flowc, 0, wr->wr_len); flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | V_FW_FLOWC_WR_NPARAMS(nparams)); flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | V_FW_WR_FLOWID(toep->tid)); #define FLOWC_PARAM(__m, __v) \ do { \ flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \ flowc->mnemval[paramidx].val = htobe32(__v); \ paramidx++; \ } while (0) paramidx = 0; FLOWC_PARAM(PFNVFN, pfvf); FLOWC_PARAM(CH, pi->tx_chan); FLOWC_PARAM(PORT, pi->tx_chan); FLOWC_PARAM(IQID, toep->ofld_rxq->iq.abs_id); if (ftxp) { uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); FLOWC_PARAM(SNDNXT, ftxp->snd_nxt); FLOWC_PARAM(RCVNXT, ftxp->rcv_nxt); FLOWC_PARAM(SNDBUF, sndbuf); FLOWC_PARAM(MSS, ftxp->mss); CTR6(KTR_CXGBE, "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt, ftxp->rcv_nxt); } else { FLOWC_PARAM(SNDBUF, 512); FLOWC_PARAM(MSS, 512); CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); } if (toep->ulp_mode == ULP_MODE_TLS) FLOWC_PARAM(ULP_MODE, toep->ulp_mode); if (toep->tls.fcplenmax != 0) FLOWC_PARAM(TXDATAPLEN_MAX, toep->tls.fcplenmax); if (toep->tc_idx != -1) FLOWC_PARAM(SCHEDCLASS, toep->tc_idx); #undef FLOWC_PARAM KASSERT(paramidx == nparams, ("nparams mismatch")); txsd->tx_credits = howmany(flowclen, 16); txsd->plen = 0; KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, ("%s: not enough credits (%d)", __func__, toep->tx_credits)); toep->tx_credits -= txsd->tx_credits; if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) toep->txsd_pidx = 0; toep->txsd_avail--; toep->flags |= TPF_FLOWC_WR_SENT; t4_wrq_tx(sc, wr); } #ifdef RATELIMIT /* * Input is Bytes/second (so_max_pacing-rate), chip counts in Kilobits/second. */ static int update_tx_rate_limit(struct adapter *sc, struct toepcb *toep, u_int Bps) { int tc_idx, rc; const u_int kbps = (u_int) (uint64_t)Bps * 8ULL / 1000; const int port_id = toep->vi->pi->port_id; CTR3(KTR_CXGBE, "%s: tid %u, rate %uKbps", __func__, toep->tid, kbps); if (kbps == 0) { /* unbind */ tc_idx = -1; } else { rc = t4_reserve_cl_rl_kbps(sc, port_id, kbps, &tc_idx); if (rc != 0) return (rc); MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls); } if (toep->tc_idx != tc_idx) { struct wrqe *wr; struct fw_flowc_wr *flowc; int nparams = 1, flowclen, flowclen16; struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); flowclen16 = howmany(flowclen, 16); if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0 || (wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq)) == NULL) { if (tc_idx >= 0) t4_release_cl_rl(sc, port_id, tc_idx); return (ENOMEM); } flowc = wrtod(wr); memset(flowc, 0, wr->wr_len); flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | V_FW_FLOWC_WR_NPARAMS(nparams)); flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) | V_FW_WR_FLOWID(toep->tid)); flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; if (tc_idx == -1) flowc->mnemval[0].val = htobe32(0xff); else flowc->mnemval[0].val = htobe32(tc_idx); txsd->tx_credits = flowclen16; txsd->plen = 0; toep->tx_credits -= txsd->tx_credits; if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) toep->txsd_pidx = 0; toep->txsd_avail--; t4_wrq_tx(sc, wr); } if (toep->tc_idx >= 0) t4_release_cl_rl(sc, port_id, toep->tc_idx); toep->tc_idx = tc_idx; return (0); } #endif void send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) { struct wrqe *wr; struct cpl_abort_req *req; int tid = toep->tid; struct inpcb *inp = toep->inp; struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ INP_WLOCK_ASSERT(inp); CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", __func__, toep->tid, inp->inp_flags & INP_DROPPED ? "inp dropped" : tcpstates[tp->t_state], toep->flags, inp->inp_flags, toep->flags & TPF_ABORT_SHUTDOWN ? " (abort already in progress)" : ""); if (toep->flags & TPF_ABORT_SHUTDOWN) return; /* abort already in progress */ toep->flags |= TPF_ABORT_SHUTDOWN; KASSERT(toep->flags & TPF_FLOWC_WR_SENT, ("%s: flowc_wr not sent for tid %d.", __func__, tid)); wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); if (wr == NULL) { /* XXX */ panic("%s: allocation failure.", __func__); } req = wrtod(wr); INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); if (inp->inp_flags & INP_DROPPED) req->rsvd0 = htobe32(snd_nxt); else req->rsvd0 = htobe32(tp->snd_nxt); req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); req->cmd = CPL_ABORT_SEND_RST; /* * XXX: What's the correct way to tell that the inp hasn't been detached * from its socket? Should I even be flushing the snd buffer here? */ if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { struct socket *so = inp->inp_socket; if (so != NULL) /* because I'm not sure. See comment above */ sbflush(&so->so_snd); } t4_l2t_send(sc, wr, toep->l2te); } /* * Called when a connection is established to translate the TCP options * reported by HW to FreeBSD's native format. */ static void assign_rxopt(struct tcpcb *tp, unsigned int opt) { struct toepcb *toep = tp->t_toe; struct inpcb *inp = tp->t_inpcb; struct adapter *sc = td_adapter(toep->td); int n; INP_LOCK_ASSERT(inp); if (inp->inp_inc.inc_flags & INC_ISIPV6) n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); else n = sizeof(struct ip) + sizeof(struct tcphdr); tp->t_maxseg = sc->params.mtus[G_TCPOPT_MSS(opt)] - n; if (G_TCPOPT_TSTAMP(opt)) { tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ tp->ts_recent = 0; /* hmmm */ tp->ts_recent_age = tcp_ts_getticks(); tp->t_maxseg -= TCPOLEN_TSTAMP_APPA; } CTR5(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u), mss %u", __func__, toep->tid, G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)], tp->t_maxseg); if (G_TCPOPT_SACK(opt)) tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ else tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ if (G_TCPOPT_WSCALE_OK(opt)) tp->t_flags |= TF_RCVD_SCALE; /* Doing window scaling? */ if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == (TF_RCVD_SCALE | TF_REQ_SCALE)) { tp->rcv_scale = tp->request_r_scale; tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); } } /* * Completes some final bits of initialization for just established connections * and changes their state to TCPS_ESTABLISHED. * * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. */ void make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, uint16_t opt) { struct inpcb *inp = toep->inp; struct socket *so = inp->inp_socket; struct tcpcb *tp = intotcpcb(inp); long bufsize; uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ uint16_t tcpopt = be16toh(opt); struct flowc_tx_params ftxp; INP_WLOCK_ASSERT(inp); KASSERT(tp->t_state == TCPS_SYN_SENT || tp->t_state == TCPS_SYN_RECEIVED, ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); CTR6(KTR_CXGBE, "%s: tid %d, so %p, inp %p, tp %p, toep %p", __func__, toep->tid, so, inp, tp, toep); tcp_state_change(tp, TCPS_ESTABLISHED); tp->t_starttime = ticks; TCPSTAT_INC(tcps_connects); tp->irs = irs; tcp_rcvseqinit(tp); tp->rcv_wnd = toep->rx_credits << 10; tp->rcv_adv += tp->rcv_wnd; tp->last_ack_sent = tp->rcv_nxt; /* * If we were unable to send all rx credits via opt0, save the remainder * in rx_credits so that they can be handed over with the next credit * update. */ SOCKBUF_LOCK(&so->so_rcv); bufsize = select_rcv_wnd(so); SOCKBUF_UNLOCK(&so->so_rcv); toep->rx_credits = bufsize - tp->rcv_wnd; tp->iss = iss; tcp_sendseqinit(tp); tp->snd_una = iss + 1; tp->snd_nxt = iss + 1; tp->snd_max = iss + 1; assign_rxopt(tp, tcpopt); SOCKBUF_LOCK(&so->so_snd); if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) bufsize = V_tcp_autosndbuf_max; else bufsize = sbspace(&so->so_snd); SOCKBUF_UNLOCK(&so->so_snd); ftxp.snd_nxt = tp->snd_nxt; ftxp.rcv_nxt = tp->rcv_nxt; ftxp.snd_space = bufsize; ftxp.mss = tp->t_maxseg; send_flowc_wr(toep, &ftxp); soisconnected(so); } int send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) { struct wrqe *wr; struct cpl_rx_data_ack *req; uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); wr = alloc_wrqe(sizeof(*req), toep->ctrlq); if (wr == NULL) return (0); req = wrtod(wr); INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); t4_wrq_tx(sc, wr); return (credits); } void send_rx_modulate(struct adapter *sc, struct toepcb *toep) { struct wrqe *wr; struct cpl_rx_data_ack *req; wr = alloc_wrqe(sizeof(*req), toep->ctrlq); if (wr == NULL) return; req = wrtod(wr); INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); req->credit_dack = htobe32(F_RX_MODULATE_RX); t4_wrq_tx(sc, wr); } void t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp) { struct adapter *sc = tod->tod_softc; struct inpcb *inp = tp->t_inpcb; struct socket *so = inp->inp_socket; struct sockbuf *sb = &so->so_rcv; struct toepcb *toep = tp->t_toe; int credits; INP_WLOCK_ASSERT(inp); SOCKBUF_LOCK_ASSERT(sb); KASSERT(toep->sb_cc >= sbused(sb), ("%s: sb %p has more data (%d) than last time (%d).", __func__, sb, sbused(sb), toep->sb_cc)); credits = toep->sb_cc - sbused(sb); toep->sb_cc = sbused(sb); if (toep->ulp_mode == ULP_MODE_TLS) { if (toep->tls.rcv_over >= credits) { toep->tls.rcv_over -= credits; credits = 0; } else { credits -= toep->tls.rcv_over; toep->tls.rcv_over = 0; } } toep->rx_credits += credits; if (toep->rx_credits > 0 && (tp->rcv_wnd <= 32 * 1024 || toep->rx_credits >= 64 * 1024 || (toep->rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) || toep->sb_cc + tp->rcv_wnd < sb->sb_lowat)) { credits = send_rx_credits(sc, toep, toep->rx_credits); toep->rx_credits -= credits; tp->rcv_wnd += credits; tp->rcv_adv += credits; } else if (toep->flags & TPF_FORCE_CREDITS) send_rx_modulate(sc, toep); } void t4_rcvd(struct toedev *tod, struct tcpcb *tp) { struct inpcb *inp = tp->t_inpcb; struct socket *so = inp->inp_socket; struct sockbuf *sb = &so->so_rcv; SOCKBUF_LOCK(sb); t4_rcvd_locked(tod, tp); SOCKBUF_UNLOCK(sb); } /* * Close a connection by sending a CPL_CLOSE_CON_REQ message. */ int t4_close_conn(struct adapter *sc, struct toepcb *toep) { struct wrqe *wr; struct cpl_close_con_req *req; unsigned int tid = toep->tid; CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); if (toep->flags & TPF_FIN_SENT) return (0); KASSERT(toep->flags & TPF_FLOWC_WR_SENT, ("%s: flowc_wr not sent for tid %u.", __func__, tid)); wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); if (wr == NULL) { /* XXX */ panic("%s: allocation failure.", __func__); } req = wrtod(wr); req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | V_FW_WR_FLOWID(tid)); req->wr.wr_lo = cpu_to_be64(0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); req->rsvd = 0; toep->flags |= TPF_FIN_SENT; toep->flags &= ~TPF_SEND_FIN; t4_l2t_send(sc, wr, toep->l2te); return (0); } #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) /* Maximum amount of immediate data we could stuff in a WR */ static inline int max_imm_payload(int tx_credits) { const int n = 2; /* Use only up to 2 desc for imm. data WR */ KASSERT(tx_credits >= 0 && tx_credits <= MAX_OFLD_TX_CREDITS, ("%s: %d credits", __func__, tx_credits)); if (tx_credits < MIN_OFLD_TX_CREDITS) return (0); if (tx_credits >= (n * EQ_ESIZE) / 16) return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); else return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); } /* Maximum number of SGL entries we could stuff in a WR */ static inline int max_dsgl_nsegs(int tx_credits) { int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; KASSERT(tx_credits >= 0 && tx_credits <= MAX_OFLD_TX_CREDITS, ("%s: %d credits", __func__, tx_credits)); if (tx_credits < MIN_OFLD_TX_CREDITS) return (0); nseg += 2 * (sge_pair_credits * 16 / 24); if ((sge_pair_credits * 16) % 24 == 16) nseg++; return (nseg); } static inline void write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign) { struct fw_ofld_tx_data_wr *txwr = dst; txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | V_FW_WR_IMMDLEN(immdlen)); txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | V_FW_WR_LEN16(credits)); txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) | V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove)); txwr->plen = htobe32(plen); if (txalign > 0) { struct tcpcb *tp = intotcpcb(toep->inp); if (plen < 2 * tp->t_maxseg) txwr->lsodisable_to_flags |= htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); else txwr->lsodisable_to_flags |= htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | (tp->t_flags & TF_NODELAY ? 0 : F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); } } /* * Generate a DSGL from a starting mbuf. The total number of segments and the * maximum segments in any one mbuf are provided. */ static void write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) { struct mbuf *m; struct ulptx_sgl *usgl = dst; int i, j, rc; struct sglist sg; struct sglist_seg segs[n]; KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); sglist_init(&sg, n, segs); usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(nsegs)); i = -1; for (m = start; m != stop; m = m->m_next) { if (IS_AIOTX_MBUF(m)) rc = sglist_append_vmpages(&sg, aiotx_mbuf_pages(m), aiotx_mbuf_pgoff(m), m->m_len); else rc = sglist_append(&sg, mtod(m, void *), m->m_len); if (__predict_false(rc != 0)) panic("%s: sglist_append %d", __func__, rc); for (j = 0; j < sg.sg_nseg; i++, j++) { if (i < 0) { usgl->len0 = htobe32(segs[j].ss_len); usgl->addr0 = htobe64(segs[j].ss_paddr); } else { usgl->sge[i / 2].len[i & 1] = htobe32(segs[j].ss_len); usgl->sge[i / 2].addr[i & 1] = htobe64(segs[j].ss_paddr); } #ifdef INVARIANTS nsegs--; #endif } sglist_reset(&sg); } if (i & 1) usgl->sge[i / 2].len[1] = htobe32(0); KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", __func__, nsegs, start, stop)); } /* * Max number of SGL entries an offload tx work request can have. This is 41 * (1 + 40) for a full 512B work request. * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) */ #define OFLD_SGL_LEN (41) /* * Send data and/or a FIN to the peer. * * The socket's so_snd buffer consists of a stream of data starting with sb_mb * and linked together with m_next. sb_sndptr, if set, is the last mbuf that * was transmitted. * * drop indicates the number of bytes that should be dropped from the head of * the send buffer. It is an optimization that lets do_fw4_ack avoid creating * contention on the send buffer lock (before this change it used to do * sowwakeup and then t4_push_frames right after that when recovering from tx * stalls). When drop is set this function MUST drop the bytes and wake up any * writers. */ void t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) { struct mbuf *sndptr, *m, *sb_sndptr; struct fw_ofld_tx_data_wr *txwr; struct wrqe *wr; u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; struct inpcb *inp = toep->inp; struct tcpcb *tp = intotcpcb(inp); struct socket *so = inp->inp_socket; struct sockbuf *sb = &so->so_snd; int tx_credits, shove, compl, sowwakeup; struct ofld_tx_sdesc *txsd; bool aiotx_mbuf_seen; INP_WLOCK_ASSERT(inp); KASSERT(toep->flags & TPF_FLOWC_WR_SENT, ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); KASSERT(toep->ulp_mode == ULP_MODE_NONE || toep->ulp_mode == ULP_MODE_TCPDDP || toep->ulp_mode == ULP_MODE_TLS || toep->ulp_mode == ULP_MODE_RDMA, ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); #ifdef VERBOSE_TRACES CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d", __func__, toep->tid, toep->flags, tp->t_flags); #endif if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) return; #ifdef RATELIMIT if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) && (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) { inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED; } #endif /* * This function doesn't resume by itself. Someone else must clear the * flag and call this function. */ if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { KASSERT(drop == 0, ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); return; } txsd = &toep->txsd[toep->txsd_pidx]; do { tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); max_imm = max_imm_payload(tx_credits); max_nsegs = max_dsgl_nsegs(tx_credits); SOCKBUF_LOCK(sb); sowwakeup = drop; if (drop) { sbdrop_locked(sb, drop); drop = 0; } sb_sndptr = sb->sb_sndptr; sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; plen = 0; nsegs = 0; max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ aiotx_mbuf_seen = false; for (m = sndptr; m != NULL; m = m->m_next) { int n; if (IS_AIOTX_MBUF(m)) n = sglist_count_vmpages(aiotx_mbuf_pages(m), aiotx_mbuf_pgoff(m), m->m_len); else n = sglist_count(mtod(m, void *), m->m_len); nsegs += n; plen += m->m_len; /* This mbuf sent us _over_ the nsegs limit, back out */ if (plen > max_imm && nsegs > max_nsegs) { nsegs -= n; plen -= m->m_len; if (plen == 0) { /* Too few credits */ toep->flags |= TPF_TX_SUSPENDED; if (sowwakeup) { if (!TAILQ_EMPTY( &toep->aiotx_jobq)) t4_aiotx_queue_toep( toep); sowwakeup_locked(so); } else SOCKBUF_UNLOCK(sb); SOCKBUF_UNLOCK_ASSERT(sb); return; } break; } if (IS_AIOTX_MBUF(m)) aiotx_mbuf_seen = true; if (max_nsegs_1mbuf < n) max_nsegs_1mbuf = n; sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ /* This mbuf put us right at the max_nsegs limit */ if (plen > max_imm && nsegs == max_nsegs) { m = m->m_next; break; } } if (sbused(sb) > sb->sb_hiwat * 5 / 8 && toep->plen_nocompl + plen >= sb->sb_hiwat / 4) compl = 1; else compl = 0; if (sb->sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf && sb->sb_hiwat < V_tcp_autosndbuf_max && sbused(sb) >= sb->sb_hiwat * 7 / 8) { int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, V_tcp_autosndbuf_max); if (!sbreserve_locked(sb, newsize, so, NULL)) sb->sb_flags &= ~SB_AUTOSIZE; else sowwakeup = 1; /* room available */ } if (sowwakeup) { if (!TAILQ_EMPTY(&toep->aiotx_jobq)) t4_aiotx_queue_toep(toep); sowwakeup_locked(so); } else SOCKBUF_UNLOCK(sb); SOCKBUF_UNLOCK_ASSERT(sb); /* nothing to send */ if (plen == 0) { KASSERT(m == NULL, ("%s: nothing to send, but m != NULL", __func__)); break; } if (__predict_false(toep->flags & TPF_FIN_SENT)) panic("%s: excess tx.", __func__); shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); if (plen <= max_imm && !aiotx_mbuf_seen) { /* Immediate data tx */ wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), toep->ofld_txq); if (wr == NULL) { /* XXX: how will we recover from this? */ toep->flags |= TPF_TX_SUSPENDED; return; } txwr = wrtod(wr); credits = howmany(wr->wr_len, 16); write_tx_wr(txwr, toep, plen, plen, credits, shove, 0, sc->tt.tx_align); m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); nsegs = 0; } else { int wr_len; /* DSGL tx */ wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); if (wr == NULL) { /* XXX: how will we recover from this? */ toep->flags |= TPF_TX_SUSPENDED; return; } txwr = wrtod(wr); credits = howmany(wr_len, 16); write_tx_wr(txwr, toep, 0, plen, credits, shove, 0, sc->tt.tx_align); write_tx_sgl(txwr + 1, sndptr, m, nsegs, max_nsegs_1mbuf); if (wr_len & 0xf) { uint64_t *pad = (uint64_t *) ((uintptr_t)txwr + wr_len); *pad = 0; } } KASSERT(toep->tx_credits >= credits, ("%s: not enough credits", __func__)); toep->tx_credits -= credits; toep->tx_nocompl += credits; toep->plen_nocompl += plen; if (toep->tx_credits <= toep->tx_total * 3 / 8 && toep->tx_nocompl >= toep->tx_total / 4) compl = 1; if (compl || toep->ulp_mode == ULP_MODE_RDMA) { txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); toep->tx_nocompl = 0; toep->plen_nocompl = 0; } tp->snd_nxt += plen; tp->snd_max += plen; SOCKBUF_LOCK(sb); KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); sb->sb_sndptr = sb_sndptr; SOCKBUF_UNLOCK(sb); toep->flags |= TPF_TX_DATA_SENT; if (toep->tx_credits < MIN_OFLD_TX_CREDITS) toep->flags |= TPF_TX_SUSPENDED; KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); txsd->plen = plen; txsd->tx_credits = credits; txsd++; if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { toep->txsd_pidx = 0; txsd = &toep->txsd[0]; } toep->txsd_avail--; t4_l2t_send(sc, wr, toep->l2te); } while (m != NULL); /* Send a FIN if requested, but only if there's no more data to send */ if (m == NULL && toep->flags & TPF_SEND_FIN) t4_close_conn(sc, toep); } static inline void rqdrop_locked(struct mbufq *q, int plen) { struct mbuf *m; while (plen > 0) { m = mbufq_dequeue(q); /* Too many credits. */ MPASS(m != NULL); M_ASSERTPKTHDR(m); /* Partial credits. */ MPASS(plen >= m->m_pkthdr.len); plen -= m->m_pkthdr.len; m_freem(m); } } void t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop) { struct mbuf *sndptr, *m; struct fw_ofld_tx_data_wr *txwr; struct wrqe *wr; u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; u_int adjusted_plen, ulp_submode; struct inpcb *inp = toep->inp; struct tcpcb *tp = intotcpcb(inp); int tx_credits, shove; struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; struct mbufq *pduq = &toep->ulp_pduq; static const u_int ulp_extra_len[] = {0, 4, 4, 8}; INP_WLOCK_ASSERT(inp); KASSERT(toep->flags & TPF_FLOWC_WR_SENT, ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); KASSERT(toep->ulp_mode == ULP_MODE_ISCSI, ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) return; /* * This function doesn't resume by itself. Someone else must clear the * flag and call this function. */ if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { KASSERT(drop == 0, ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); return; } if (drop) rqdrop_locked(&toep->ulp_pdu_reclaimq, drop); while ((sndptr = mbufq_first(pduq)) != NULL) { M_ASSERTPKTHDR(sndptr); tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); max_imm = max_imm_payload(tx_credits); max_nsegs = max_dsgl_nsegs(tx_credits); plen = 0; nsegs = 0; max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ for (m = sndptr; m != NULL; m = m->m_next) { int n = sglist_count(mtod(m, void *), m->m_len); nsegs += n; plen += m->m_len; /* * This mbuf would send us _over_ the nsegs limit. * Suspend tx because the PDU can't be sent out. */ if (plen > max_imm && nsegs > max_nsegs) { toep->flags |= TPF_TX_SUSPENDED; return; } if (max_nsegs_1mbuf < n) max_nsegs_1mbuf = n; } if (__predict_false(toep->flags & TPF_FIN_SENT)) panic("%s: excess tx.", __func__); /* * We have a PDU to send. All of it goes out in one WR so 'm' * is NULL. A PDU's length is always a multiple of 4. */ MPASS(m == NULL); MPASS((plen & 3) == 0); MPASS(sndptr->m_pkthdr.len == plen); shove = !(tp->t_flags & TF_MORETOCOME); ulp_submode = mbuf_ulp_submode(sndptr); MPASS(ulp_submode < nitems(ulp_extra_len)); /* * plen doesn't include header and data digests, which are * generated and inserted in the right places by the TOE, but * they do occupy TCP sequence space and need to be accounted * for. */ adjusted_plen = plen + ulp_extra_len[ulp_submode]; if (plen <= max_imm) { /* Immediate data tx */ wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), toep->ofld_txq); if (wr == NULL) { /* XXX: how will we recover from this? */ toep->flags |= TPF_TX_SUSPENDED; return; } txwr = wrtod(wr); credits = howmany(wr->wr_len, 16); write_tx_wr(txwr, toep, plen, adjusted_plen, credits, shove, ulp_submode, sc->tt.tx_align); m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); nsegs = 0; } else { int wr_len; /* DSGL tx */ wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); if (wr == NULL) { /* XXX: how will we recover from this? */ toep->flags |= TPF_TX_SUSPENDED; return; } txwr = wrtod(wr); credits = howmany(wr_len, 16); write_tx_wr(txwr, toep, 0, adjusted_plen, credits, shove, ulp_submode, sc->tt.tx_align); write_tx_sgl(txwr + 1, sndptr, m, nsegs, max_nsegs_1mbuf); if (wr_len & 0xf) { uint64_t *pad = (uint64_t *) ((uintptr_t)txwr + wr_len); *pad = 0; } } KASSERT(toep->tx_credits >= credits, ("%s: not enough credits", __func__)); m = mbufq_dequeue(pduq); MPASS(m == sndptr); mbufq_enqueue(&toep->ulp_pdu_reclaimq, m); toep->tx_credits -= credits; toep->tx_nocompl += credits; toep->plen_nocompl += plen; if (toep->tx_credits <= toep->tx_total * 3 / 8 && toep->tx_nocompl >= toep->tx_total / 4) { txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); toep->tx_nocompl = 0; toep->plen_nocompl = 0; } tp->snd_nxt += adjusted_plen; tp->snd_max += adjusted_plen; toep->flags |= TPF_TX_DATA_SENT; if (toep->tx_credits < MIN_OFLD_TX_CREDITS) toep->flags |= TPF_TX_SUSPENDED; KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); txsd->plen = plen; txsd->tx_credits = credits; txsd++; if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { toep->txsd_pidx = 0; txsd = &toep->txsd[0]; } toep->txsd_avail--; t4_l2t_send(sc, wr, toep->l2te); } /* Send a FIN if requested, but only if there are no more PDUs to send */ if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN) t4_close_conn(sc, toep); } int t4_tod_output(struct toedev *tod, struct tcpcb *tp) { struct adapter *sc = tod->tod_softc; #ifdef INVARIANTS struct inpcb *inp = tp->t_inpcb; #endif struct toepcb *toep = tp->t_toe; INP_WLOCK_ASSERT(inp); KASSERT((inp->inp_flags & INP_DROPPED) == 0, ("%s: inp %p dropped.", __func__, inp)); KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); if (toep->ulp_mode == ULP_MODE_ISCSI) t4_push_pdus(sc, toep, 0); else if (tls_tx_key(toep)) t4_push_tls_records(sc, toep, 0); else t4_push_frames(sc, toep, 0); return (0); } int t4_send_fin(struct toedev *tod, struct tcpcb *tp) { struct adapter *sc = tod->tod_softc; #ifdef INVARIANTS struct inpcb *inp = tp->t_inpcb; #endif struct toepcb *toep = tp->t_toe; INP_WLOCK_ASSERT(inp); KASSERT((inp->inp_flags & INP_DROPPED) == 0, ("%s: inp %p dropped.", __func__, inp)); KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); toep->flags |= TPF_SEND_FIN; if (tp->t_state >= TCPS_ESTABLISHED) { if (toep->ulp_mode == ULP_MODE_ISCSI) t4_push_pdus(sc, toep, 0); else if (tls_tx_key(toep)) t4_push_tls_records(sc, toep, 0); else t4_push_frames(sc, toep, 0); } return (0); } int t4_send_rst(struct toedev *tod, struct tcpcb *tp) { struct adapter *sc = tod->tod_softc; #if defined(INVARIANTS) struct inpcb *inp = tp->t_inpcb; #endif struct toepcb *toep = tp->t_toe; INP_WLOCK_ASSERT(inp); KASSERT((inp->inp_flags & INP_DROPPED) == 0, ("%s: inp %p dropped.", __func__, inp)); KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); /* hmmmm */ KASSERT(toep->flags & TPF_FLOWC_WR_SENT, ("%s: flowc for tid %u [%s] not sent already", __func__, toep->tid, tcpstates[tp->t_state])); send_reset(sc, toep, 0); return (0); } /* * Peer has sent us a FIN. */ static int do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_peer_close *cpl = (const void *)(rss + 1); unsigned int tid = GET_TID(cpl); struct toepcb *toep = lookup_tid(sc, tid); struct inpcb *inp = toep->inp; struct tcpcb *tp = NULL; struct socket *so; #ifdef INVARIANTS unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); #endif KASSERT(opcode == CPL_PEER_CLOSE, ("%s: unexpected opcode 0x%x", __func__, opcode)); KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); if (__predict_false(toep->flags & TPF_SYNQE)) { #ifdef INVARIANTS struct synq_entry *synqe = (void *)toep; INP_WLOCK(synqe->lctx->inp); if (synqe->flags & TPF_SYNQE_HAS_L2TE) { KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, ("%s: listen socket closed but tid %u not aborted.", __func__, tid)); } else { /* * do_pass_accept_req is still running and will * eventually take care of this tid. */ } INP_WUNLOCK(synqe->lctx->inp); #endif CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, toep, toep->flags); return (0); } KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); CURVNET_SET(toep->vnet); INP_INFO_RLOCK(&V_tcbinfo); INP_WLOCK(inp); tp = intotcpcb(inp); CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); if (toep->flags & TPF_ABORT_SHUTDOWN) goto done; tp->rcv_nxt++; /* FIN */ so = inp->inp_socket; if (toep->ulp_mode == ULP_MODE_TCPDDP) { DDP_LOCK(toep); if (__predict_false(toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) handle_ddp_close(toep, tp, cpl->rcv_nxt); DDP_UNLOCK(toep); } socantrcvmore(so); if (toep->ulp_mode != ULP_MODE_RDMA) { KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, be32toh(cpl->rcv_nxt))); } switch (tp->t_state) { case TCPS_SYN_RECEIVED: tp->t_starttime = ticks; /* FALLTHROUGH */ case TCPS_ESTABLISHED: tcp_state_change(tp, TCPS_CLOSE_WAIT); break; case TCPS_FIN_WAIT_1: tcp_state_change(tp, TCPS_CLOSING); break; case TCPS_FIN_WAIT_2: tcp_twstart(tp); INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ INP_INFO_RUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); INP_WLOCK(inp); final_cpl_received(toep); return (0); default: log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", __func__, tid, tp->t_state); } done: INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); return (0); } /* * Peer has ACK'd our FIN. */ static int do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); unsigned int tid = GET_TID(cpl); struct toepcb *toep = lookup_tid(sc, tid); struct inpcb *inp = toep->inp; struct tcpcb *tp = NULL; struct socket *so = NULL; #ifdef INVARIANTS unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); #endif KASSERT(opcode == CPL_CLOSE_CON_RPL, ("%s: unexpected opcode 0x%x", __func__, opcode)); KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); CURVNET_SET(toep->vnet); INP_INFO_RLOCK(&V_tcbinfo); INP_WLOCK(inp); tp = intotcpcb(inp); CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); if (toep->flags & TPF_ABORT_SHUTDOWN) goto done; so = inp->inp_socket; tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ switch (tp->t_state) { case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ tcp_twstart(tp); release: INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ INP_INFO_RUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); INP_WLOCK(inp); final_cpl_received(toep); /* no more CPLs expected */ return (0); case TCPS_LAST_ACK: if (tcp_close(tp)) INP_WUNLOCK(inp); goto release; case TCPS_FIN_WAIT_1: if (so->so_rcv.sb_state & SBS_CANTRCVMORE) soisdisconnected(so); tcp_state_change(tp, TCPS_FIN_WAIT_2); break; default: log(LOG_ERR, "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", __func__, tid, tcpstates[tp->t_state]); } done: INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); return (0); } void send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, int rst_status) { struct wrqe *wr; struct cpl_abort_rpl *cpl; wr = alloc_wrqe(sizeof(*cpl), ofld_txq); if (wr == NULL) { /* XXX */ panic("%s: allocation failure.", __func__); } cpl = wrtod(wr); INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); cpl->cmd = rst_status; t4_wrq_tx(sc, wr); } static int abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) { switch (abort_reason) { case CPL_ERR_BAD_SYN: case CPL_ERR_CONN_RESET: return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); case CPL_ERR_XMIT_TIMEDOUT: case CPL_ERR_PERSIST_TIMEDOUT: case CPL_ERR_FINWAIT2_TIMEDOUT: case CPL_ERR_KEEPALIVE_TIMEDOUT: return (ETIMEDOUT); default: return (EIO); } } /* * TCP RST from the peer, timeout, or some other such critical error. */ static int do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); unsigned int tid = GET_TID(cpl); struct toepcb *toep = lookup_tid(sc, tid); struct sge_wrq *ofld_txq = toep->ofld_txq; struct inpcb *inp; struct tcpcb *tp; #ifdef INVARIANTS unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); #endif KASSERT(opcode == CPL_ABORT_REQ_RSS, ("%s: unexpected opcode 0x%x", __func__, opcode)); KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); if (toep->flags & TPF_SYNQE) return (do_abort_req_synqe(iq, rss, m)); KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); if (negative_advice(cpl->status)) { CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", __func__, cpl->status, tid, toep->flags); return (0); /* Ignore negative advice */ } inp = toep->inp; CURVNET_SET(toep->vnet); INP_INFO_RLOCK(&V_tcbinfo); /* for tcp_close */ INP_WLOCK(inp); tp = intotcpcb(inp); CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp->inp_flags, cpl->status); /* * If we'd initiated an abort earlier the reply to it is responsible for * cleaning up resources. Otherwise we tear everything down right here * right now. We owe the T4 a CPL_ABORT_RPL no matter what. */ if (toep->flags & TPF_ABORT_SHUTDOWN) { INP_WUNLOCK(inp); goto done; } toep->flags |= TPF_ABORT_SHUTDOWN; if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { struct socket *so = inp->inp_socket; if (so != NULL) so_error_set(so, abort_status_to_errno(tp, cpl->status)); tp = tcp_close(tp); if (tp == NULL) INP_WLOCK(inp); /* re-acquire */ } final_cpl_received(toep); done: INP_INFO_RUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); return (0); } /* * Reply to the CPL_ABORT_REQ (send_reset) */ static int do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); unsigned int tid = GET_TID(cpl); struct toepcb *toep = lookup_tid(sc, tid); struct inpcb *inp = toep->inp; #ifdef INVARIANTS unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); #endif KASSERT(opcode == CPL_ABORT_RPL_RSS, ("%s: unexpected opcode 0x%x", __func__, opcode)); KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); if (toep->flags & TPF_SYNQE) return (do_abort_rpl_synqe(iq, rss, m)); KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", __func__, tid, toep, inp, cpl->status); KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, ("%s: wasn't expecting abort reply", __func__)); INP_WLOCK(inp); final_cpl_received(toep); return (0); } static int do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_rx_data *cpl = mtod(m, const void *); unsigned int tid = GET_TID(cpl); struct toepcb *toep = lookup_tid(sc, tid); struct inpcb *inp = toep->inp; struct tcpcb *tp; struct socket *so; struct sockbuf *sb; int len; uint32_t ddp_placed = 0; if (__predict_false(toep->flags & TPF_SYNQE)) { #ifdef INVARIANTS struct synq_entry *synqe = (void *)toep; INP_WLOCK(synqe->lctx->inp); if (synqe->flags & TPF_SYNQE_HAS_L2TE) { KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, ("%s: listen socket closed but tid %u not aborted.", __func__, tid)); } else { /* * do_pass_accept_req is still running and will * eventually take care of this tid. */ } INP_WUNLOCK(synqe->lctx->inp); #endif CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, toep, toep->flags); m_freem(m); return (0); } KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); /* strip off CPL header */ m_adj(m, sizeof(*cpl)); len = m->m_pkthdr.len; INP_WLOCK(inp); if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", __func__, tid, len, inp->inp_flags); INP_WUNLOCK(inp); m_freem(m); return (0); } tp = intotcpcb(inp); if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; tp->rcv_nxt += len; if (tp->rcv_wnd < len) { KASSERT(toep->ulp_mode == ULP_MODE_RDMA, ("%s: negative window size", __func__)); } tp->rcv_wnd -= len; tp->t_rcvtime = ticks; if (toep->ulp_mode == ULP_MODE_TCPDDP) DDP_LOCK(toep); so = inp_inpcbtosocket(inp); sb = &so->so_rcv; SOCKBUF_LOCK(sb); if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", __func__, tid, len); m_freem(m); SOCKBUF_UNLOCK(sb); if (toep->ulp_mode == ULP_MODE_TCPDDP) DDP_UNLOCK(toep); INP_WUNLOCK(inp); CURVNET_SET(toep->vnet); INP_INFO_RLOCK(&V_tcbinfo); INP_WLOCK(inp); tp = tcp_drop(tp, ECONNRESET); if (tp) INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); return (0); } /* receive buffer autosize */ MPASS(toep->vnet == so->so_vnet); CURVNET_SET(toep->vnet); if (sb->sb_flags & SB_AUTOSIZE && V_tcp_do_autorcvbuf && sb->sb_hiwat < V_tcp_autorcvbuf_max && len > (sbspace(sb) / 8 * 7)) { unsigned int hiwat = sb->sb_hiwat; unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, V_tcp_autorcvbuf_max); if (!sbreserve_locked(sb, newsize, so, NULL)) sb->sb_flags &= ~SB_AUTOSIZE; else toep->rx_credits += newsize - hiwat; } if (toep->ulp_mode == ULP_MODE_TCPDDP) { int changed = !(toep->ddp.flags & DDP_ON) ^ cpl->ddp_off; if (toep->ddp.waiting_count != 0 || toep->ddp.active_count != 0) CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)", __func__, tid, len); if (changed) { if (toep->ddp.flags & DDP_SC_REQ) toep->ddp.flags ^= DDP_ON | DDP_SC_REQ; else { KASSERT(cpl->ddp_off == 1, ("%s: DDP switched on by itself.", __func__)); /* Fell out of DDP mode */ toep->ddp.flags &= ~DDP_ON; CTR1(KTR_CXGBE, "%s: fell out of DDP mode", __func__); insert_ddp_data(toep, ddp_placed); } } if (toep->ddp.flags & DDP_ON) { /* * CPL_RX_DATA with DDP on can only be an indicate. * Start posting queued AIO requests via DDP. The * payload that arrived in this indicate is appended * to the socket buffer as usual. */ handle_ddp_indicate(toep); } } KASSERT(toep->sb_cc >= sbused(sb), ("%s: sb %p has more data (%d) than last time (%d).", __func__, sb, sbused(sb), toep->sb_cc)); toep->rx_credits += toep->sb_cc - sbused(sb); sbappendstream_locked(sb, m, 0); toep->sb_cc = sbused(sb); if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) { int credits; credits = send_rx_credits(sc, toep, toep->rx_credits); toep->rx_credits -= credits; tp->rcv_wnd += credits; tp->rcv_adv += credits; } if (toep->ulp_mode == ULP_MODE_TCPDDP && toep->ddp.waiting_count > 0 && sbavail(sb) != 0) { CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__, tid); ddp_queue_toep(toep); } sorwakeup_locked(so); SOCKBUF_UNLOCK_ASSERT(sb); if (toep->ulp_mode == ULP_MODE_TCPDDP) DDP_UNLOCK(toep); INP_WUNLOCK(inp); CURVNET_RESTORE(); return (0); } static int do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); struct toepcb *toep = lookup_tid(sc, tid); struct inpcb *inp; struct tcpcb *tp; struct socket *so; uint8_t credits = cpl->credits; struct ofld_tx_sdesc *txsd; int plen; #ifdef INVARIANTS unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); #endif /* * Very unusual case: we'd sent a flowc + abort_req for a synq entry and * now this comes back carrying the credits for the flowc. */ if (__predict_false(toep->flags & TPF_SYNQE)) { KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, ("%s: credits for a synq entry %p", __func__, toep)); return (0); } inp = toep->inp; KASSERT(opcode == CPL_FW4_ACK, ("%s: unexpected opcode 0x%x", __func__, opcode)); KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); INP_WLOCK(inp); if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { INP_WUNLOCK(inp); return (0); } KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); tp = intotcpcb(inp); if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { tcp_seq snd_una = be32toh(cpl->snd_una); #ifdef INVARIANTS if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { log(LOG_ERR, "%s: unexpected seq# %x for TID %u, snd_una %x\n", __func__, snd_una, toep->tid, tp->snd_una); } #endif if (tp->snd_una != snd_una) { tp->snd_una = snd_una; tp->ts_recent_age = tcp_ts_getticks(); } } #ifdef VERBOSE_TRACES CTR3(KTR_CXGBE, "%s: tid %d credits %u", __func__, tid, credits); #endif so = inp->inp_socket; txsd = &toep->txsd[toep->txsd_cidx]; plen = 0; while (credits) { KASSERT(credits >= txsd->tx_credits, ("%s: too many (or partial) credits", __func__)); credits -= txsd->tx_credits; toep->tx_credits += txsd->tx_credits; plen += txsd->plen; if (txsd->iv_buffer) { free(txsd->iv_buffer, M_CXGBE); txsd->iv_buffer = NULL; } txsd++; toep->txsd_avail++; KASSERT(toep->txsd_avail <= toep->txsd_total, ("%s: txsd avail > total", __func__)); if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { txsd = &toep->txsd[0]; toep->txsd_cidx = 0; } } if (toep->tx_credits == toep->tx_total) { toep->tx_nocompl = 0; toep->plen_nocompl = 0; } if (toep->flags & TPF_TX_SUSPENDED && toep->tx_credits >= toep->tx_total / 4) { #ifdef VERBOSE_TRACES CTR2(KTR_CXGBE, "%s: tid %d calling t4_push_frames", __func__, tid); #endif toep->flags &= ~TPF_TX_SUSPENDED; CURVNET_SET(toep->vnet); if (toep->ulp_mode == ULP_MODE_ISCSI) t4_push_pdus(sc, toep, plen); else if (tls_tx_key(toep)) t4_push_tls_records(sc, toep, plen); else t4_push_frames(sc, toep, plen); CURVNET_RESTORE(); } else if (plen > 0) { struct sockbuf *sb = &so->so_snd; int sbu; SOCKBUF_LOCK(sb); sbu = sbused(sb); if (toep->ulp_mode == ULP_MODE_ISCSI) { if (__predict_false(sbu > 0)) { /* * The data trasmitted before the tid's ULP mode * changed to ISCSI is still in so_snd. * Incoming credits should account for so_snd * first. */ sbdrop_locked(sb, min(sbu, plen)); plen -= min(sbu, plen); } sowwakeup_locked(so); /* unlocks so_snd */ rqdrop_locked(&toep->ulp_pdu_reclaimq, plen); } else { #ifdef VERBOSE_TRACES CTR3(KTR_CXGBE, "%s: tid %d dropped %d bytes", __func__, tid, plen); #endif sbdrop_locked(sb, plen); if (tls_tx_key(toep)) { struct tls_ofld_info *tls_ofld = &toep->tls; MPASS(tls_ofld->sb_off >= plen); tls_ofld->sb_off -= plen; } if (!TAILQ_EMPTY(&toep->aiotx_jobq)) t4_aiotx_queue_toep(toep); sowwakeup_locked(so); /* unlocks so_snd */ } SOCKBUF_UNLOCK_ASSERT(sb); } INP_WUNLOCK(inp); return (0); } void t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, struct toepcb *toep, uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie) { struct wrqe *wr; struct cpl_set_tcb_field *req; struct ofld_tx_sdesc *txsd; MPASS((cookie & ~M_COOKIE) == 0); if (reply) { MPASS(cookie != CPL_COOKIE_RESERVED); } wr = alloc_wrqe(sizeof(*req), wrq); if (wr == NULL) { /* XXX */ panic("%s: allocation failure.", __func__); } req = wrtod(wr); INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id)); if (reply == 0) req->reply_ctrl |= htobe16(F_NO_REPLY); req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie)); req->mask = htobe64(mask); req->val = htobe64(val); if ((wrq->eq.flags & EQ_TYPEMASK) == EQ_OFLD) { txsd = &toep->txsd[toep->txsd_pidx]; txsd->tx_credits = howmany(sizeof(*req), 16); txsd->plen = 0; KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, ("%s: not enough credits (%d)", __func__, toep->tx_credits)); toep->tx_credits -= txsd->tx_credits; if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) toep->txsd_pidx = 0; toep->txsd_avail--; } t4_wrq_tx(sc, wr); } void t4_init_cpl_io_handlers(void) { t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close); t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl); t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req); t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl, CPL_COOKIE_TOM); t4_register_cpl_handler(CPL_RX_DATA, do_rx_data); t4_register_shared_cpl_handler(CPL_FW4_ACK, do_fw4_ack, CPL_COOKIE_TOM); } void t4_uninit_cpl_io_handlers(void) { t4_register_cpl_handler(CPL_PEER_CLOSE, NULL); t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL); t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL); t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, NULL, CPL_COOKIE_TOM); t4_register_cpl_handler(CPL_RX_DATA, NULL); t4_register_shared_cpl_handler(CPL_FW4_ACK, NULL, CPL_COOKIE_TOM); } /* * Use the 'backend3' field in AIO jobs to store the amount of data * sent by the AIO job so far and the 'backend4' field to hold an * error that should be reported when the job is completed. */ #define aio_sent backend3 #define aio_error backend4 #define jobtotid(job) \ (((struct toepcb *)(so_sototcpcb((job)->fd_file->f_data)->t_toe))->tid) static void free_aiotx_buffer(struct aiotx_buffer *ab) { struct kaiocb *job; long status; int error; if (refcount_release(&ab->refcount) == 0) return; job = ab->job; error = job->aio_error; status = job->aio_sent; vm_page_unhold_pages(ab->ps.pages, ab->ps.npages); free(ab, M_CXGBE); #ifdef VERBOSE_TRACES CTR5(KTR_CXGBE, "%s: tid %d completed %p len %ld, error %d", __func__, jobtotid(job), job, status, error); #endif if (error == ECANCELED && status != 0) error = 0; if (error == ECANCELED) aio_cancel(job); else if (error) aio_complete(job, -1, error); else aio_complete(job, status, 0); } static void t4_aiotx_mbuf_free(struct mbuf *m, void *buffer, void *arg) { struct aiotx_buffer *ab = buffer; #ifdef VERBOSE_TRACES CTR3(KTR_CXGBE, "%s: completed %d bytes for tid %d", __func__, m->m_len, jobtotid(ab->job)); #endif free_aiotx_buffer(ab); } /* * Hold the buffer backing an AIO request and return an AIO transmit * buffer. */ static int hold_aio(struct kaiocb *job) { struct aiotx_buffer *ab; struct vmspace *vm; vm_map_t map; vm_offset_t start, end, pgoff; int n; MPASS(job->backend1 == NULL); /* * The AIO subsystem will cancel and drain all requests before * permitting a process to exit or exec, so p_vmspace should * be stable here. */ vm = job->userproc->p_vmspace; map = &vm->vm_map; start = (uintptr_t)job->uaiocb.aio_buf; pgoff = start & PAGE_MASK; end = round_page(start + job->uaiocb.aio_nbytes); start = trunc_page(start); n = atop(end - start); ab = malloc(sizeof(*ab) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK | M_ZERO); refcount_init(&ab->refcount, 1); ab->ps.pages = (vm_page_t *)(ab + 1); ab->ps.npages = vm_fault_quick_hold_pages(map, start, end - start, VM_PROT_WRITE, ab->ps.pages, n); if (ab->ps.npages < 0) { free(ab, M_CXGBE); return (EFAULT); } KASSERT(ab->ps.npages == n, ("hold_aio: page count mismatch: %d vs %d", ab->ps.npages, n)); ab->ps.offset = pgoff; ab->ps.len = job->uaiocb.aio_nbytes; ab->job = job; job->backend1 = ab; #ifdef VERBOSE_TRACES CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d", __func__, jobtotid(job), &ab->ps, job, ab->ps.npages); #endif return (0); } static void t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job) { struct adapter *sc; struct sockbuf *sb; struct file *fp; struct aiotx_buffer *ab; struct inpcb *inp; struct tcpcb *tp; struct mbuf *m; int error; bool moretocome, sendmore; sc = td_adapter(toep->td); sb = &so->so_snd; SOCKBUF_UNLOCK(sb); fp = job->fd_file; ab = job->backend1; m = NULL; #ifdef MAC error = mac_socket_check_send(fp->f_cred, so); if (error != 0) goto out; #endif if (ab == NULL) { error = hold_aio(job); if (error != 0) goto out; ab = job->backend1; } /* Inline sosend_generic(). */ job->msgsnd = 1; error = sblock(sb, SBL_WAIT); MPASS(error == 0); sendanother: m = m_get(M_WAITOK, MT_DATA); SOCKBUF_LOCK(sb); if (so->so_snd.sb_state & SBS_CANTSENDMORE) { SOCKBUF_UNLOCK(sb); sbunlock(sb); if ((so->so_options & SO_NOSIGPIPE) == 0) { PROC_LOCK(job->userproc); kern_psignal(job->userproc, SIGPIPE); PROC_UNLOCK(job->userproc); } error = EPIPE; goto out; } if (so->so_error) { error = so->so_error; so->so_error = 0; SOCKBUF_UNLOCK(sb); sbunlock(sb); goto out; } if ((so->so_state & SS_ISCONNECTED) == 0) { SOCKBUF_UNLOCK(sb); sbunlock(sb); error = ENOTCONN; goto out; } if (sbspace(sb) < sb->sb_lowat) { MPASS(job->aio_sent == 0 || !(so->so_state & SS_NBIO)); /* * Don't block if there is too little room in the socket * buffer. Instead, requeue the request. */ if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { SOCKBUF_UNLOCK(sb); sbunlock(sb); error = ECANCELED; goto out; } TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); SOCKBUF_UNLOCK(sb); sbunlock(sb); goto out; } /* * Write as much data as the socket permits, but no more than a * a single sndbuf at a time. */ m->m_len = sbspace(sb); if (m->m_len > ab->ps.len - job->aio_sent) { m->m_len = ab->ps.len - job->aio_sent; moretocome = false; } else moretocome = true; if (m->m_len > sc->tt.sndbuf) { m->m_len = sc->tt.sndbuf; sendmore = true; } else sendmore = false; if (!TAILQ_EMPTY(&toep->aiotx_jobq)) moretocome = true; SOCKBUF_UNLOCK(sb); MPASS(m->m_len != 0); /* Inlined tcp_usr_send(). */ inp = toep->inp; INP_WLOCK(inp); if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { INP_WUNLOCK(inp); sbunlock(sb); error = ECONNRESET; goto out; } refcount_acquire(&ab->refcount); m_extadd(m, NULL, ab->ps.len, t4_aiotx_mbuf_free, ab, (void *)(uintptr_t)job->aio_sent, 0, EXT_NET_DRV); m->m_ext.ext_flags |= EXT_FLAG_AIOTX; job->aio_sent += m->m_len; sbappendstream(sb, m, 0); m = NULL; if (!(inp->inp_flags & INP_DROPPED)) { tp = intotcpcb(inp); if (moretocome) tp->t_flags |= TF_MORETOCOME; error = tp->t_fb->tfb_tcp_output(tp); if (moretocome) tp->t_flags &= ~TF_MORETOCOME; } INP_WUNLOCK(inp); if (sendmore) goto sendanother; sbunlock(sb); if (error) goto out; /* * If this is a non-blocking socket and the request has not * been fully completed, requeue it until the socket is ready * again. */ if (job->aio_sent < job->uaiocb.aio_nbytes && !(so->so_state & SS_NBIO)) { SOCKBUF_LOCK(sb); if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { SOCKBUF_UNLOCK(sb); error = ECANCELED; goto out; } TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); return; } /* * If the request will not be requeued, drop a reference on * the the aiotx buffer. Any mbufs in flight should still * contain a reference, but this drops the reference that the * job owns while it is waiting to queue mbufs to the socket. */ free_aiotx_buffer(ab); out: if (error) { if (ab != NULL) { job->aio_error = error; free_aiotx_buffer(ab); } else { MPASS(job->aio_sent == 0); aio_complete(job, -1, error); } } if (m != NULL) m_free(m); SOCKBUF_LOCK(sb); } static void t4_aiotx_task(void *context, int pending) { struct toepcb *toep = context; struct inpcb *inp = toep->inp; struct socket *so = inp->inp_socket; struct kaiocb *job; CURVNET_SET(toep->vnet); SOCKBUF_LOCK(&so->so_snd); while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) { job = TAILQ_FIRST(&toep->aiotx_jobq); TAILQ_REMOVE(&toep->aiotx_jobq, job, list); if (!aio_clear_cancel_function(job)) continue; t4_aiotx_process_job(toep, so, job); } toep->aiotx_task_active = false; SOCKBUF_UNLOCK(&so->so_snd); CURVNET_RESTORE(); free_toepcb(toep); } static void t4_aiotx_queue_toep(struct toepcb *toep) { SOCKBUF_LOCK_ASSERT(&toep->inp->inp_socket->so_snd); #ifdef VERBOSE_TRACES CTR3(KTR_CXGBE, "%s: queueing aiotx task for tid %d, active = %s", __func__, toep->tid, toep->aiotx_task_active ? "true" : "false"); #endif if (toep->aiotx_task_active) return; toep->aiotx_task_active = true; hold_toepcb(toep); soaio_enqueue(&toep->aiotx_task); } static void t4_aiotx_cancel(struct kaiocb *job) { struct aiotx_buffer *ab; struct socket *so; struct sockbuf *sb; struct tcpcb *tp; struct toepcb *toep; so = job->fd_file->f_data; tp = so_sototcpcb(so); toep = tp->t_toe; MPASS(job->uaiocb.aio_lio_opcode == LIO_WRITE); sb = &so->so_snd; SOCKBUF_LOCK(sb); if (!aio_cancel_cleared(job)) TAILQ_REMOVE(&toep->aiotx_jobq, job, list); SOCKBUF_UNLOCK(sb); ab = job->backend1; if (ab != NULL) free_aiotx_buffer(ab); else aio_cancel(job); } int t4_aio_queue_aiotx(struct socket *so, struct kaiocb *job) { struct tcpcb *tp = so_sototcpcb(so); struct toepcb *toep = tp->t_toe; struct adapter *sc = td_adapter(toep->td); /* This only handles writes. */ if (job->uaiocb.aio_lio_opcode != LIO_WRITE) return (EOPNOTSUPP); if (!sc->tt.tx_zcopy) return (EOPNOTSUPP); if (tls_tx_key(toep)) return (EOPNOTSUPP); SOCKBUF_LOCK(&so->so_snd); #ifdef VERBOSE_TRACES CTR2(KTR_CXGBE, "%s: queueing %p", __func__, job); #endif if (!aio_set_cancel_function(job, t4_aiotx_cancel)) panic("new job was cancelled"); TAILQ_INSERT_TAIL(&toep->aiotx_jobq, job, list); if (sowriteable(so)) t4_aiotx_queue_toep(toep); SOCKBUF_UNLOCK(&so->so_snd); return (0); } void aiotx_init_toep(struct toepcb *toep) { TAILQ_INIT(&toep->aiotx_jobq); TASK_INIT(&toep->aiotx_task, 0, t4_aiotx_task, toep); } #endif Index: stable/11/sys/dev/cxgbe/tom/t4_listen.c =================================================================== --- stable/11/sys/dev/cxgbe/tom/t4_listen.c (revision 346966) +++ stable/11/sys/dev/cxgbe/tom/t4_listen.c (revision 346967) @@ -1,1719 +1,1719 @@ /*- * Copyright (c) 2012 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #ifdef TCP_OFFLOAD #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TCPSTATES #include #include #include #include #include "common/common.h" #include "common/t4_msg.h" #include "common/t4_regs.h" #include "t4_clip.h" #include "tom/t4_tom_l2t.h" #include "tom/t4_tom.h" /* stid services */ static int alloc_stid(struct adapter *, struct listen_ctx *, int); static struct listen_ctx *lookup_stid(struct adapter *, int); static void free_stid(struct adapter *, struct listen_ctx *); /* lctx services */ static struct listen_ctx *alloc_lctx(struct adapter *, struct inpcb *, struct vi_info *); static int free_lctx(struct adapter *, struct listen_ctx *); static void hold_lctx(struct listen_ctx *); static void listen_hash_add(struct adapter *, struct listen_ctx *); static struct listen_ctx *listen_hash_find(struct adapter *, struct inpcb *); static struct listen_ctx *listen_hash_del(struct adapter *, struct inpcb *); static struct inpcb *release_lctx(struct adapter *, struct listen_ctx *); static inline void save_qids_in_mbuf(struct mbuf *, struct vi_info *, struct offload_settings *); static inline void get_qids_from_mbuf(struct mbuf *m, int *, int *); static void send_reset_synqe(struct toedev *, struct synq_entry *); static int alloc_stid(struct adapter *sc, struct listen_ctx *lctx, int isipv6) { struct tid_info *t = &sc->tids; u_int stid, n, f, mask; struct stid_region *sr = &lctx->stid_region; /* * An IPv6 server needs 2 naturally aligned stids (1 stid = 4 cells) in * the TCAM. The start of the stid region is properly aligned (the chip * requires each region to be 128-cell aligned). */ n = isipv6 ? 2 : 1; mask = n - 1; KASSERT((t->stid_base & mask) == 0 && (t->nstids & mask) == 0, ("%s: stid region (%u, %u) not properly aligned. n = %u", __func__, t->stid_base, t->nstids, n)); mtx_lock(&t->stid_lock); if (n > t->nstids - t->stids_in_use) { mtx_unlock(&t->stid_lock); return (-1); } if (t->nstids_free_head >= n) { /* * This allocation will definitely succeed because the region * starts at a good alignment and we just checked we have enough * stids free. */ f = t->nstids_free_head & mask; t->nstids_free_head -= n + f; stid = t->nstids_free_head; TAILQ_INSERT_HEAD(&t->stids, sr, link); } else { struct stid_region *s; stid = t->nstids_free_head; TAILQ_FOREACH(s, &t->stids, link) { stid += s->used + s->free; f = stid & mask; if (s->free >= n + f) { stid -= n + f; s->free -= n + f; TAILQ_INSERT_AFTER(&t->stids, s, sr, link); goto allocated; } } if (__predict_false(stid != t->nstids)) { panic("%s: stids TAILQ (%p) corrupt." " At %d instead of %d at the end of the queue.", __func__, &t->stids, stid, t->nstids); } mtx_unlock(&t->stid_lock); return (-1); } allocated: sr->used = n; sr->free = f; t->stids_in_use += n; t->stid_tab[stid] = lctx; mtx_unlock(&t->stid_lock); KASSERT(((stid + t->stid_base) & mask) == 0, ("%s: EDOOFUS.", __func__)); return (stid + t->stid_base); } static struct listen_ctx * lookup_stid(struct adapter *sc, int stid) { struct tid_info *t = &sc->tids; return (t->stid_tab[stid - t->stid_base]); } static void free_stid(struct adapter *sc, struct listen_ctx *lctx) { struct tid_info *t = &sc->tids; struct stid_region *sr = &lctx->stid_region; struct stid_region *s; KASSERT(sr->used > 0, ("%s: nonsense free (%d)", __func__, sr->used)); mtx_lock(&t->stid_lock); s = TAILQ_PREV(sr, stid_head, link); if (s != NULL) s->free += sr->used + sr->free; else t->nstids_free_head += sr->used + sr->free; KASSERT(t->stids_in_use >= sr->used, ("%s: stids_in_use (%u) < stids being freed (%u)", __func__, t->stids_in_use, sr->used)); t->stids_in_use -= sr->used; TAILQ_REMOVE(&t->stids, sr, link); mtx_unlock(&t->stid_lock); } static struct listen_ctx * alloc_lctx(struct adapter *sc, struct inpcb *inp, struct vi_info *vi) { struct listen_ctx *lctx; INP_WLOCK_ASSERT(inp); lctx = malloc(sizeof(struct listen_ctx), M_CXGBE, M_NOWAIT | M_ZERO); if (lctx == NULL) return (NULL); lctx->stid = alloc_stid(sc, lctx, inp->inp_vflag & INP_IPV6); if (lctx->stid < 0) { free(lctx, M_CXGBE); return (NULL); } if (inp->inp_vflag & INP_IPV6 && !IN6_ARE_ADDR_EQUAL(&in6addr_any, &inp->in6p_laddr)) { lctx->ce = t4_hold_lip(sc, &inp->in6p_laddr, NULL); if (lctx->ce == NULL) { free(lctx, M_CXGBE); return (NULL); } } lctx->ctrlq = &sc->sge.ctrlq[vi->pi->port_id]; lctx->ofld_rxq = &sc->sge.ofld_rxq[vi->first_ofld_rxq]; refcount_init(&lctx->refcount, 1); TAILQ_INIT(&lctx->synq); lctx->inp = inp; lctx->vnet = inp->inp_socket->so_vnet; in_pcbref(inp); return (lctx); } /* Don't call this directly, use release_lctx instead */ static int free_lctx(struct adapter *sc, struct listen_ctx *lctx) { struct inpcb *inp = lctx->inp; INP_WLOCK_ASSERT(inp); KASSERT(lctx->refcount == 0, ("%s: refcount %d", __func__, lctx->refcount)); KASSERT(TAILQ_EMPTY(&lctx->synq), ("%s: synq not empty.", __func__)); KASSERT(lctx->stid >= 0, ("%s: bad stid %d.", __func__, lctx->stid)); CTR4(KTR_CXGBE, "%s: stid %u, lctx %p, inp %p", __func__, lctx->stid, lctx, lctx->inp); if (lctx->ce) t4_release_lip(sc, lctx->ce); free_stid(sc, lctx); free(lctx, M_CXGBE); return (in_pcbrele_wlocked(inp)); } static void hold_lctx(struct listen_ctx *lctx) { refcount_acquire(&lctx->refcount); } static inline uint32_t listen_hashfn(void *key, u_long mask) { return (fnv_32_buf(&key, sizeof(key), FNV1_32_INIT) & mask); } /* * Add a listen_ctx entry to the listen hash table. */ static void listen_hash_add(struct adapter *sc, struct listen_ctx *lctx) { struct tom_data *td = sc->tom_softc; int bucket = listen_hashfn(lctx->inp, td->listen_mask); mtx_lock(&td->lctx_hash_lock); LIST_INSERT_HEAD(&td->listen_hash[bucket], lctx, link); td->lctx_count++; mtx_unlock(&td->lctx_hash_lock); } /* * Look for the listening socket's context entry in the hash and return it. */ static struct listen_ctx * listen_hash_find(struct adapter *sc, struct inpcb *inp) { struct tom_data *td = sc->tom_softc; int bucket = listen_hashfn(inp, td->listen_mask); struct listen_ctx *lctx; mtx_lock(&td->lctx_hash_lock); LIST_FOREACH(lctx, &td->listen_hash[bucket], link) { if (lctx->inp == inp) break; } mtx_unlock(&td->lctx_hash_lock); return (lctx); } /* * Removes the listen_ctx structure for inp from the hash and returns it. */ static struct listen_ctx * listen_hash_del(struct adapter *sc, struct inpcb *inp) { struct tom_data *td = sc->tom_softc; int bucket = listen_hashfn(inp, td->listen_mask); struct listen_ctx *lctx, *l; mtx_lock(&td->lctx_hash_lock); LIST_FOREACH_SAFE(lctx, &td->listen_hash[bucket], link, l) { if (lctx->inp == inp) { LIST_REMOVE(lctx, link); td->lctx_count--; break; } } mtx_unlock(&td->lctx_hash_lock); return (lctx); } /* * Releases a hold on the lctx. Must be called with the listening socket's inp * locked. The inp may be freed by this function and it returns NULL to * indicate this. */ static struct inpcb * release_lctx(struct adapter *sc, struct listen_ctx *lctx) { struct inpcb *inp = lctx->inp; int inp_freed = 0; INP_WLOCK_ASSERT(inp); if (refcount_release(&lctx->refcount)) inp_freed = free_lctx(sc, lctx); return (inp_freed ? NULL : inp); } static void send_reset_synqe(struct toedev *tod, struct synq_entry *synqe) { struct adapter *sc = tod->tod_softc; struct mbuf *m = synqe->syn; struct ifnet *ifp = m->m_pkthdr.rcvif; struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx]; struct wrqe *wr; struct fw_flowc_wr *flowc; struct cpl_abort_req *req; int txqid, rxqid, flowclen; struct sge_wrq *ofld_txq; struct sge_ofld_rxq *ofld_rxq; const int nparams = 6; - unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN; + const u_int pfvf = sc->pf << S_FW_VIID_PFN; INP_WLOCK_ASSERT(synqe->lctx->inp); CTR5(KTR_CXGBE, "%s: synqe %p (0x%x), tid %d%s", __func__, synqe, synqe->flags, synqe->tid, synqe->flags & TPF_ABORT_SHUTDOWN ? " (abort already in progress)" : ""); if (synqe->flags & TPF_ABORT_SHUTDOWN) return; /* abort already in progress */ synqe->flags |= TPF_ABORT_SHUTDOWN; get_qids_from_mbuf(m, &txqid, &rxqid); ofld_txq = &sc->sge.ofld_txq[txqid]; ofld_rxq = &sc->sge.ofld_rxq[rxqid]; /* The wrqe will have two WRs - a flowc followed by an abort_req */ flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); wr = alloc_wrqe(roundup2(flowclen, EQ_ESIZE) + sizeof(*req), ofld_txq); if (wr == NULL) { /* XXX */ panic("%s: allocation failure.", __func__); } flowc = wrtod(wr); req = (void *)((caddr_t)flowc + roundup2(flowclen, EQ_ESIZE)); /* First the flowc ... */ memset(flowc, 0, wr->wr_len); flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | V_FW_FLOWC_WR_NPARAMS(nparams)); flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | V_FW_WR_FLOWID(synqe->tid)); flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; flowc->mnemval[0].val = htobe32(pfvf); flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; flowc->mnemval[1].val = htobe32(pi->tx_chan); flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; flowc->mnemval[2].val = htobe32(pi->tx_chan); flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; flowc->mnemval[3].val = htobe32(ofld_rxq->iq.abs_id); flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF; flowc->mnemval[4].val = htobe32(512); flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS; flowc->mnemval[5].val = htobe32(512); synqe->flags |= TPF_FLOWC_WR_SENT; /* ... then ABORT request */ INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, synqe->tid); req->rsvd0 = 0; /* don't have a snd_nxt */ req->rsvd1 = 1; /* no data sent yet */ req->cmd = CPL_ABORT_SEND_RST; t4_l2t_send(sc, wr, e); } static int create_server(struct adapter *sc, struct listen_ctx *lctx) { struct wrqe *wr; struct cpl_pass_open_req *req; struct inpcb *inp = lctx->inp; wr = alloc_wrqe(sizeof(*req), lctx->ctrlq); if (wr == NULL) { log(LOG_ERR, "%s: allocation failure", __func__); return (ENOMEM); } req = wrtod(wr); INIT_TP_WR(req, 0); OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, lctx->stid)); req->local_port = inp->inp_lport; req->peer_port = 0; req->local_ip = inp->inp_laddr.s_addr; req->peer_ip = 0; req->opt0 = htobe64(V_TX_CHAN(lctx->ctrlq->eq.tx_chan)); req->opt1 = htobe64(V_CONN_POLICY(CPL_CONN_POLICY_ASK) | F_SYN_RSS_ENABLE | V_SYN_RSS_QUEUE(lctx->ofld_rxq->iq.abs_id)); t4_wrq_tx(sc, wr); return (0); } static int create_server6(struct adapter *sc, struct listen_ctx *lctx) { struct wrqe *wr; struct cpl_pass_open_req6 *req; struct inpcb *inp = lctx->inp; wr = alloc_wrqe(sizeof(*req), lctx->ctrlq); if (wr == NULL) { log(LOG_ERR, "%s: allocation failure", __func__); return (ENOMEM); } req = wrtod(wr); INIT_TP_WR(req, 0); OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, lctx->stid)); req->local_port = inp->inp_lport; req->peer_port = 0; req->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0]; req->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8]; req->peer_ip_hi = 0; req->peer_ip_lo = 0; req->opt0 = htobe64(V_TX_CHAN(lctx->ctrlq->eq.tx_chan)); req->opt1 = htobe64(V_CONN_POLICY(CPL_CONN_POLICY_ASK) | F_SYN_RSS_ENABLE | V_SYN_RSS_QUEUE(lctx->ofld_rxq->iq.abs_id)); t4_wrq_tx(sc, wr); return (0); } static int destroy_server(struct adapter *sc, struct listen_ctx *lctx) { struct wrqe *wr; struct cpl_close_listsvr_req *req; wr = alloc_wrqe(sizeof(*req), lctx->ctrlq); if (wr == NULL) { /* XXX */ panic("%s: allocation failure.", __func__); } req = wrtod(wr); INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, lctx->stid)); req->reply_ctrl = htobe16(lctx->ofld_rxq->iq.abs_id); req->rsvd = htobe16(0); t4_wrq_tx(sc, wr); return (0); } /* * Start a listening server by sending a passive open request to HW. * * Can't take adapter lock here and access to sc->flags, * sc->offload_map, if_capenable are all race prone. */ int t4_listen_start(struct toedev *tod, struct tcpcb *tp) { struct adapter *sc = tod->tod_softc; struct vi_info *vi; struct port_info *pi; struct inpcb *inp = tp->t_inpcb; struct listen_ctx *lctx; int i, rc, v; struct offload_settings settings; INP_WLOCK_ASSERT(inp); rw_rlock(&sc->policy_lock); settings = *lookup_offload_policy(sc, OPEN_TYPE_LISTEN, NULL, 0xffff, inp); rw_runlock(&sc->policy_lock); if (!settings.offload) return (0); /* Don't start a hardware listener for any loopback address. */ if (inp->inp_vflag & INP_IPV6 && IN6_IS_ADDR_LOOPBACK(&inp->in6p_laddr)) return (0); if (!(inp->inp_vflag & INP_IPV6) && IN_LOOPBACK(ntohl(inp->inp_laddr.s_addr))) return (0); #if 0 ADAPTER_LOCK(sc); if (IS_BUSY(sc)) { log(LOG_ERR, "%s: listen request ignored, %s is busy", __func__, device_get_nameunit(sc->dev)); goto done; } KASSERT(uld_active(sc, ULD_TOM), ("%s: TOM not initialized", __func__)); #endif /* * Find an initialized VI with IFCAP_TOE (4 or 6). We'll use the first * such VI's queues to send the passive open and receive the reply to * it. * * XXX: need a way to mark a port in use by offload. if_cxgbe should * then reject any attempt to bring down such a port (and maybe reject * attempts to disable IFCAP_TOE on that port too?). */ for_each_port(sc, i) { pi = sc->port[i]; for_each_vi(pi, v, vi) { if (vi->flags & VI_INIT_DONE && vi->ifp->if_capenable & IFCAP_TOE) goto found; } } goto done; /* no port that's UP with IFCAP_TOE enabled */ found: if (listen_hash_find(sc, inp) != NULL) goto done; /* already setup */ lctx = alloc_lctx(sc, inp, vi); if (lctx == NULL) { log(LOG_ERR, "%s: listen request ignored, %s couldn't allocate lctx\n", __func__, device_get_nameunit(sc->dev)); goto done; } listen_hash_add(sc, lctx); CTR6(KTR_CXGBE, "%s: stid %u (%s), lctx %p, inp %p vflag 0x%x", __func__, lctx->stid, tcpstates[tp->t_state], lctx, inp, inp->inp_vflag); if (inp->inp_vflag & INP_IPV6) rc = create_server6(sc, lctx); else rc = create_server(sc, lctx); if (rc != 0) { log(LOG_ERR, "%s: %s failed to create hw listener: %d.\n", __func__, device_get_nameunit(sc->dev), rc); (void) listen_hash_del(sc, inp); inp = release_lctx(sc, lctx); /* can't be freed, host stack has a reference */ KASSERT(inp != NULL, ("%s: inp freed", __func__)); goto done; } lctx->flags |= LCTX_RPL_PENDING; done: #if 0 ADAPTER_UNLOCK(sc); #endif return (0); } int t4_listen_stop(struct toedev *tod, struct tcpcb *tp) { struct listen_ctx *lctx; struct adapter *sc = tod->tod_softc; struct inpcb *inp = tp->t_inpcb; struct synq_entry *synqe; INP_WLOCK_ASSERT(inp); lctx = listen_hash_del(sc, inp); if (lctx == NULL) return (ENOENT); /* no hardware listener for this inp */ CTR4(KTR_CXGBE, "%s: stid %u, lctx %p, flags %x", __func__, lctx->stid, lctx, lctx->flags); /* * If the reply to the PASS_OPEN is still pending we'll wait for it to * arrive and clean up when it does. */ if (lctx->flags & LCTX_RPL_PENDING) { KASSERT(TAILQ_EMPTY(&lctx->synq), ("%s: synq not empty.", __func__)); return (EINPROGRESS); } /* * The host stack will abort all the connections on the listening * socket's so_comp. It doesn't know about the connections on the synq * so we need to take care of those. */ TAILQ_FOREACH(synqe, &lctx->synq, link) { if (synqe->flags & TPF_SYNQE_HAS_L2TE) send_reset_synqe(tod, synqe); } destroy_server(sc, lctx); return (0); } static inline void hold_synqe(struct synq_entry *synqe) { refcount_acquire(&synqe->refcnt); } static inline void release_synqe(struct synq_entry *synqe) { if (refcount_release(&synqe->refcnt)) { int needfree = synqe->flags & TPF_SYNQE_NEEDFREE; m_freem(synqe->syn); if (needfree) free(synqe, M_CXGBE); } } void t4_syncache_added(struct toedev *tod __unused, void *arg) { struct synq_entry *synqe = arg; hold_synqe(synqe); } void t4_syncache_removed(struct toedev *tod __unused, void *arg) { struct synq_entry *synqe = arg; release_synqe(synqe); } int t4_syncache_respond(struct toedev *tod, void *arg, struct mbuf *m) { struct adapter *sc = tod->tod_softc; struct synq_entry *synqe = arg; struct wrqe *wr; struct l2t_entry *e; struct tcpopt to; struct ip *ip = mtod(m, struct ip *); struct tcphdr *th; wr = (struct wrqe *)atomic_readandclear_ptr(&synqe->wr); if (wr == NULL) { m_freem(m); return (EALREADY); } if (ip->ip_v == IPVERSION) th = (void *)(ip + 1); else th = (void *)((struct ip6_hdr *)ip + 1); bzero(&to, sizeof(to)); tcp_dooptions(&to, (void *)(th + 1), (th->th_off << 2) - sizeof(*th), TO_SYN); /* save these for later */ synqe->iss = be32toh(th->th_seq); synqe->ts = to.to_tsval; if (chip_id(sc) >= CHELSIO_T5) { struct cpl_t5_pass_accept_rpl *rpl5 = wrtod(wr); rpl5->iss = th->th_seq; } e = &sc->l2t->l2tab[synqe->l2e_idx]; t4_l2t_send(sc, wr, e); m_freem(m); /* don't need this any more */ return (0); } static int do_pass_open_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_pass_open_rpl *cpl = (const void *)(rss + 1); int stid = GET_TID(cpl); unsigned int status = cpl->status; struct listen_ctx *lctx = lookup_stid(sc, stid); struct inpcb *inp = lctx->inp; #ifdef INVARIANTS unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); #endif KASSERT(opcode == CPL_PASS_OPEN_RPL, ("%s: unexpected opcode 0x%x", __func__, opcode)); KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); KASSERT(lctx->stid == stid, ("%s: lctx stid mismatch", __func__)); INP_WLOCK(inp); CTR4(KTR_CXGBE, "%s: stid %d, status %u, flags 0x%x", __func__, stid, status, lctx->flags); lctx->flags &= ~LCTX_RPL_PENDING; if (status != CPL_ERR_NONE) log(LOG_ERR, "listener (stid %u) failed: %d\n", stid, status); #ifdef INVARIANTS /* * If the inp has been dropped (listening socket closed) then * listen_stop must have run and taken the inp out of the hash. */ if (inp->inp_flags & INP_DROPPED) { KASSERT(listen_hash_del(sc, inp) == NULL, ("%s: inp %p still in listen hash", __func__, inp)); } #endif if (inp->inp_flags & INP_DROPPED && status != CPL_ERR_NONE) { if (release_lctx(sc, lctx) != NULL) INP_WUNLOCK(inp); return (status); } /* * Listening socket stopped listening earlier and now the chip tells us * it has started the hardware listener. Stop it; the lctx will be * released in do_close_server_rpl. */ if (inp->inp_flags & INP_DROPPED) { destroy_server(sc, lctx); INP_WUNLOCK(inp); return (status); } /* * Failed to start hardware listener. Take inp out of the hash and * release our reference on it. An error message has been logged * already. */ if (status != CPL_ERR_NONE) { listen_hash_del(sc, inp); if (release_lctx(sc, lctx) != NULL) INP_WUNLOCK(inp); return (status); } /* hardware listener open for business */ INP_WUNLOCK(inp); return (status); } static int do_close_server_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_close_listsvr_rpl *cpl = (const void *)(rss + 1); int stid = GET_TID(cpl); unsigned int status = cpl->status; struct listen_ctx *lctx = lookup_stid(sc, stid); struct inpcb *inp = lctx->inp; #ifdef INVARIANTS unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); #endif KASSERT(opcode == CPL_CLOSE_LISTSRV_RPL, ("%s: unexpected opcode 0x%x", __func__, opcode)); KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); KASSERT(lctx->stid == stid, ("%s: lctx stid mismatch", __func__)); CTR3(KTR_CXGBE, "%s: stid %u, status %u", __func__, stid, status); if (status != CPL_ERR_NONE) { log(LOG_ERR, "%s: failed (%u) to close listener for stid %u\n", __func__, status, stid); return (status); } INP_WLOCK(inp); inp = release_lctx(sc, lctx); if (inp != NULL) INP_WUNLOCK(inp); return (status); } static void done_with_synqe(struct adapter *sc, struct synq_entry *synqe) { struct listen_ctx *lctx = synqe->lctx; struct inpcb *inp = lctx->inp; struct vi_info *vi = synqe->syn->m_pkthdr.rcvif->if_softc; struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx]; int ntids; INP_WLOCK_ASSERT(inp); ntids = inp->inp_vflag & INP_IPV6 ? 2 : 1; TAILQ_REMOVE(&lctx->synq, synqe, link); inp = release_lctx(sc, lctx); if (inp) INP_WUNLOCK(inp); remove_tid(sc, synqe->tid, ntids); release_tid(sc, synqe->tid, &sc->sge.ctrlq[vi->pi->port_id]); t4_l2t_release(e); release_synqe(synqe); /* removed from synq list */ } int do_abort_req_synqe(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); unsigned int tid = GET_TID(cpl); struct synq_entry *synqe = lookup_tid(sc, tid); struct listen_ctx *lctx = synqe->lctx; struct inpcb *inp = lctx->inp; int txqid; struct sge_wrq *ofld_txq; #ifdef INVARIANTS unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); #endif KASSERT(opcode == CPL_ABORT_REQ_RSS, ("%s: unexpected opcode 0x%x", __func__, opcode)); KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); KASSERT(synqe->tid == tid, ("%s: toep tid mismatch", __func__)); CTR6(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x), lctx %p, status %d", __func__, tid, synqe, synqe->flags, synqe->lctx, cpl->status); if (negative_advice(cpl->status)) return (0); /* Ignore negative advice */ INP_WLOCK(inp); get_qids_from_mbuf(synqe->syn, &txqid, NULL); ofld_txq = &sc->sge.ofld_txq[txqid]; /* * If we'd initiated an abort earlier the reply to it is responsible for * cleaning up resources. Otherwise we tear everything down right here * right now. We owe the T4 a CPL_ABORT_RPL no matter what. */ if (synqe->flags & TPF_ABORT_SHUTDOWN) { INP_WUNLOCK(inp); goto done; } done_with_synqe(sc, synqe); /* inp lock released by done_with_synqe */ done: send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); return (0); } int do_abort_rpl_synqe(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); unsigned int tid = GET_TID(cpl); struct synq_entry *synqe = lookup_tid(sc, tid); struct listen_ctx *lctx = synqe->lctx; struct inpcb *inp = lctx->inp; #ifdef INVARIANTS unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); #endif KASSERT(opcode == CPL_ABORT_RPL_RSS, ("%s: unexpected opcode 0x%x", __func__, opcode)); KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); KASSERT(synqe->tid == tid, ("%s: toep tid mismatch", __func__)); CTR6(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x), lctx %p, status %d", __func__, tid, synqe, synqe->flags, synqe->lctx, cpl->status); INP_WLOCK(inp); KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, ("%s: wasn't expecting abort reply for synqe %p (0x%x)", __func__, synqe, synqe->flags)); done_with_synqe(sc, synqe); /* inp lock released by done_with_synqe */ return (0); } void t4_offload_socket(struct toedev *tod, void *arg, struct socket *so) { struct adapter *sc = tod->tod_softc; struct synq_entry *synqe = arg; #ifdef INVARIANTS struct inpcb *inp = sotoinpcb(so); #endif struct cpl_pass_establish *cpl = mtod(synqe->syn, void *); struct toepcb *toep = *(struct toepcb **)(cpl + 1); INP_INFO_RLOCK_ASSERT(&V_tcbinfo); /* prevents bad race with accept() */ INP_WLOCK_ASSERT(inp); KASSERT(synqe->flags & TPF_SYNQE, ("%s: %p not a synq_entry?", __func__, arg)); offload_socket(so, toep); make_established(toep, cpl->snd_isn, cpl->rcv_isn, cpl->tcp_opt); toep->flags |= TPF_CPL_PENDING; update_tid(sc, synqe->tid, toep); synqe->flags |= TPF_SYNQE_EXPANDED; } static inline void save_qids_in_mbuf(struct mbuf *m, struct vi_info *vi, struct offload_settings *s) { uint32_t txqid, rxqid; if (s->txq >= 0 && s->txq < vi->nofldtxq) txqid = s->txq; else txqid = arc4random() % vi->nofldtxq; txqid += vi->first_ofld_txq; if (s->rxq >= 0 && s->rxq < vi->nofldrxq) rxqid = s->rxq; else rxqid = arc4random() % vi->nofldrxq; rxqid += vi->first_ofld_rxq; m->m_pkthdr.flowid = (txqid << 16) | (rxqid & 0xffff); } static inline void get_qids_from_mbuf(struct mbuf *m, int *txqid, int *rxqid) { if (txqid) *txqid = m->m_pkthdr.flowid >> 16; if (rxqid) *rxqid = m->m_pkthdr.flowid & 0xffff; } /* * Use the trailing space in the mbuf in which the PASS_ACCEPT_REQ arrived to * store some state temporarily. */ static struct synq_entry * mbuf_to_synqe(struct mbuf *m) { int len = roundup2(sizeof (struct synq_entry), 8); int tspace = M_TRAILINGSPACE(m); struct synq_entry *synqe = NULL; if (tspace < len) { synqe = malloc(sizeof(*synqe), M_CXGBE, M_NOWAIT); if (synqe == NULL) return (NULL); synqe->flags = TPF_SYNQE | TPF_SYNQE_NEEDFREE; } else { synqe = (void *)(m->m_data + m->m_len + tspace - len); synqe->flags = TPF_SYNQE; } return (synqe); } static void t4opt_to_tcpopt(const struct tcp_options *t4opt, struct tcpopt *to) { bzero(to, sizeof(*to)); if (t4opt->mss) { to->to_flags |= TOF_MSS; to->to_mss = be16toh(t4opt->mss); } if (t4opt->wsf) { to->to_flags |= TOF_SCALE; to->to_wscale = t4opt->wsf; } if (t4opt->tstamp) to->to_flags |= TOF_TS; if (t4opt->sack) to->to_flags |= TOF_SACKPERM; } /* * Options2 for passive open. */ static uint32_t calc_opt2p(struct adapter *sc, struct port_info *pi, int rxqid, const struct tcp_options *tcpopt, struct tcphdr *th, int ulp_mode, struct cc_algo *cc, const struct offload_settings *s) { struct sge_ofld_rxq *ofld_rxq = &sc->sge.ofld_rxq[rxqid]; uint32_t opt2 = 0; /* * rx flow control, rx coalesce, congestion control, and tx pace are all * explicitly set by the driver. On T5+ the ISS is also set by the * driver to the value picked by the kernel. */ if (is_t4(sc)) { opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID; opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID; } else { opt2 |= F_T5_OPT_2_VALID; /* all 4 valid */ opt2 |= F_T5_ISS; /* ISS provided in CPL */ } if (tcpopt->sack && (s->sack > 0 || (s->sack < 0 && V_tcp_do_rfc1323))) opt2 |= F_SACK_EN; if (tcpopt->tstamp && (s->tstamp > 0 || (s->tstamp < 0 && V_tcp_do_rfc1323))) opt2 |= F_TSTAMPS_EN; if (tcpopt->wsf < 15 && V_tcp_do_rfc1323) opt2 |= F_WND_SCALE_EN; if (th->th_flags & (TH_ECE | TH_CWR) && (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn))) opt2 |= F_CCTRL_ECN; /* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */ opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]); /* These defaults are subject to ULP specific fixups later. */ opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0); opt2 |= V_PACE(0); if (s->cong_algo >= 0) opt2 |= V_CONG_CNTRL(s->cong_algo); else if (sc->tt.cong_algorithm >= 0) opt2 |= V_CONG_CNTRL(sc->tt.cong_algorithm & M_CONG_CNTRL); else { if (strcasecmp(cc->name, "reno") == 0) opt2 |= V_CONG_CNTRL(CONG_ALG_RENO); else if (strcasecmp(cc->name, "tahoe") == 0) opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); if (strcasecmp(cc->name, "newreno") == 0) opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO); if (strcasecmp(cc->name, "highspeed") == 0) opt2 |= V_CONG_CNTRL(CONG_ALG_HIGHSPEED); else { /* * Use newreno in case the algorithm selected by the * host stack is not supported by the hardware. */ opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO); } } if (s->rx_coalesce > 0 || (s->rx_coalesce < 0 && sc->tt.rx_coalesce)) opt2 |= V_RX_COALESCE(M_RX_COALESCE); /* Note that ofld_rxq is already set according to s->rxq. */ opt2 |= F_RSS_QUEUE_VALID; opt2 |= V_RSS_QUEUE(ofld_rxq->iq.abs_id); #ifdef USE_DDP_RX_FLOW_CONTROL if (ulp_mode == ULP_MODE_TCPDDP) opt2 |= F_RX_FC_DDP; #endif if (ulp_mode == ULP_MODE_TLS) { opt2 &= ~V_RX_COALESCE(M_RX_COALESCE); opt2 |= F_RX_FC_DISABLE; } return (htobe32(opt2)); } static void pass_accept_req_to_protohdrs(struct adapter *sc, const struct mbuf *m, struct in_conninfo *inc, struct tcphdr *th) { const struct cpl_pass_accept_req *cpl = mtod(m, const void *); const struct ether_header *eh; unsigned int hlen = be32toh(cpl->hdr_len); uintptr_t l3hdr; const struct tcphdr *tcp; eh = (const void *)(cpl + 1); if (chip_id(sc) >= CHELSIO_T6) { l3hdr = ((uintptr_t)eh + G_T6_ETH_HDR_LEN(hlen)); tcp = (const void *)(l3hdr + G_T6_IP_HDR_LEN(hlen)); } else { l3hdr = ((uintptr_t)eh + G_ETH_HDR_LEN(hlen)); tcp = (const void *)(l3hdr + G_IP_HDR_LEN(hlen)); } if (inc) { bzero(inc, sizeof(*inc)); inc->inc_fport = tcp->th_sport; inc->inc_lport = tcp->th_dport; if (((struct ip *)l3hdr)->ip_v == IPVERSION) { const struct ip *ip = (const void *)l3hdr; inc->inc_faddr = ip->ip_src; inc->inc_laddr = ip->ip_dst; } else { const struct ip6_hdr *ip6 = (const void *)l3hdr; inc->inc_flags |= INC_ISIPV6; inc->inc6_faddr = ip6->ip6_src; inc->inc6_laddr = ip6->ip6_dst; } } if (th) { bcopy(tcp, th, sizeof(*th)); tcp_fields_to_host(th); /* just like tcp_input */ } } static struct l2t_entry * get_l2te_for_nexthop(struct port_info *pi, struct ifnet *ifp, struct in_conninfo *inc) { struct l2t_entry *e; struct sockaddr_in6 sin6; struct sockaddr *dst = (void *)&sin6; if (inc->inc_flags & INC_ISIPV6) { struct nhop6_basic nh6; bzero(dst, sizeof(struct sockaddr_in6)); dst->sa_len = sizeof(struct sockaddr_in6); dst->sa_family = AF_INET6; if (IN6_IS_ADDR_LINKLOCAL(&inc->inc6_laddr)) { /* no need for route lookup */ e = t4_l2t_get(pi, ifp, dst); return (e); } if (fib6_lookup_nh_basic(RT_DEFAULT_FIB, &inc->inc6_faddr, 0, 0, 0, &nh6) != 0) return (NULL); if (nh6.nh_ifp != ifp) return (NULL); ((struct sockaddr_in6 *)dst)->sin6_addr = nh6.nh_addr; } else { struct nhop4_basic nh4; dst->sa_len = sizeof(struct sockaddr_in); dst->sa_family = AF_INET; if (fib4_lookup_nh_basic(RT_DEFAULT_FIB, inc->inc_faddr, 0, 0, &nh4) != 0) return (NULL); if (nh4.nh_ifp != ifp) return (NULL); ((struct sockaddr_in *)dst)->sin_addr = nh4.nh_addr; } e = t4_l2t_get(pi, ifp, dst); return (e); } #define REJECT_PASS_ACCEPT() do { \ reject_reason = __LINE__; \ goto reject; \ } while (0) /* * The context associated with a tid entry via insert_tid could be a synq_entry * or a toepcb. The only way CPL handlers can tell is via a bit in these flags. */ CTASSERT(offsetof(struct toepcb, flags) == offsetof(struct synq_entry, flags)); /* * Incoming SYN on a listening socket. * * XXX: Every use of ifp in this routine has a bad race with up/down, toe/-toe, * etc. */ static int do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; struct toedev *tod; const struct cpl_pass_accept_req *cpl = mtod(m, const void *); struct cpl_pass_accept_rpl *rpl; struct wrqe *wr; unsigned int stid = G_PASS_OPEN_TID(be32toh(cpl->tos_stid)); unsigned int tid = GET_TID(cpl); struct listen_ctx *lctx = lookup_stid(sc, stid); struct inpcb *inp; struct socket *so; struct in_conninfo inc; struct tcphdr th; struct tcpopt to; struct port_info *pi; struct vi_info *vi; struct ifnet *hw_ifp, *ifp; struct l2t_entry *e = NULL; int rscale, mtu_idx, rx_credits, rxqid, ulp_mode; struct synq_entry *synqe = NULL; int reject_reason, v, ntids; uint16_t vid; #ifdef INVARIANTS unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); #endif struct offload_settings settings; KASSERT(opcode == CPL_PASS_ACCEPT_REQ, ("%s: unexpected opcode 0x%x", __func__, opcode)); KASSERT(lctx->stid == stid, ("%s: lctx stid mismatch", __func__)); CTR4(KTR_CXGBE, "%s: stid %u, tid %u, lctx %p", __func__, stid, tid, lctx); pass_accept_req_to_protohdrs(sc, m, &inc, &th); t4opt_to_tcpopt(&cpl->tcpopt, &to); pi = sc->port[G_SYN_INTF(be16toh(cpl->l2info))]; CURVNET_SET(lctx->vnet); /* * Use the MAC index to lookup the associated VI. If this SYN * didn't match a perfect MAC filter, punt. */ if (!(be16toh(cpl->l2info) & F_SYN_XACT_MATCH)) { m_freem(m); m = NULL; REJECT_PASS_ACCEPT(); } for_each_vi(pi, v, vi) { if (vi->xact_addr_filt == G_SYN_MAC_IDX(be16toh(cpl->l2info))) goto found; } m_freem(m); m = NULL; REJECT_PASS_ACCEPT(); found: hw_ifp = vi->ifp; /* the (v)cxgbeX ifnet */ m->m_pkthdr.rcvif = hw_ifp; tod = TOEDEV(hw_ifp); /* * Figure out if there is a pseudo interface (vlan, lagg, etc.) * involved. Don't offload if the SYN had a VLAN tag and the vid * doesn't match anything on this interface. * * XXX: lagg support, lagg + vlan support. */ vid = EVL_VLANOFTAG(be16toh(cpl->vlan)); if (vid != 0xfff) { ifp = VLAN_DEVAT(hw_ifp, vid); if (ifp == NULL) REJECT_PASS_ACCEPT(); } else ifp = hw_ifp; /* * Don't offload if the peer requested a TCP option that's not known to * the silicon. */ if (cpl->tcpopt.unknown) REJECT_PASS_ACCEPT(); if (inc.inc_flags & INC_ISIPV6) { /* Don't offload if the ifcap isn't enabled */ if ((ifp->if_capenable & IFCAP_TOE6) == 0) REJECT_PASS_ACCEPT(); /* * SYN must be directed to an IP6 address on this ifnet. This * is more restrictive than in6_localip. */ if (!in6_ifhasaddr(ifp, &inc.inc6_laddr)) REJECT_PASS_ACCEPT(); ntids = 2; } else { /* Don't offload if the ifcap isn't enabled */ if ((ifp->if_capenable & IFCAP_TOE4) == 0) REJECT_PASS_ACCEPT(); /* * SYN must be directed to an IP address on this ifnet. This * is more restrictive than in_localip. */ if (!in_ifhasaddr(ifp, inc.inc_laddr)) REJECT_PASS_ACCEPT(); ntids = 1; } /* * Don't offload if the ifnet that the SYN came in on is not in the same * vnet as the listening socket. */ if (lctx->vnet != ifp->if_vnet) REJECT_PASS_ACCEPT(); e = get_l2te_for_nexthop(pi, ifp, &inc); if (e == NULL) REJECT_PASS_ACCEPT(); synqe = mbuf_to_synqe(m); if (synqe == NULL) REJECT_PASS_ACCEPT(); wr = alloc_wrqe(is_t4(sc) ? sizeof(struct cpl_pass_accept_rpl) : sizeof(struct cpl_t5_pass_accept_rpl), &sc->sge.ctrlq[pi->port_id]); if (wr == NULL) REJECT_PASS_ACCEPT(); rpl = wrtod(wr); INP_INFO_RLOCK(&V_tcbinfo); /* for 4-tuple check */ /* Don't offload if the 4-tuple is already in use */ if (toe_4tuple_check(&inc, &th, ifp) != 0) { INP_INFO_RUNLOCK(&V_tcbinfo); free(wr, M_CXGBE); REJECT_PASS_ACCEPT(); } INP_INFO_RUNLOCK(&V_tcbinfo); inp = lctx->inp; /* listening socket, not owned by TOE */ INP_WLOCK(inp); /* Don't offload if the listening socket has closed */ if (__predict_false(inp->inp_flags & INP_DROPPED)) { /* * The listening socket has closed. The reply from the TOE to * our CPL_CLOSE_LISTSRV_REQ will ultimately release all * resources tied to this listen context. */ INP_WUNLOCK(inp); free(wr, M_CXGBE); REJECT_PASS_ACCEPT(); } so = inp->inp_socket; rw_rlock(&sc->policy_lock); settings = *lookup_offload_policy(sc, OPEN_TYPE_PASSIVE, m, 0xffff, inp); rw_runlock(&sc->policy_lock); if (!settings.offload) { INP_WUNLOCK(inp); free(wr, M_CXGBE); REJECT_PASS_ACCEPT(); } mtu_idx = find_best_mtu_idx(sc, &inc, &settings); rscale = cpl->tcpopt.wsf && V_tcp_do_rfc1323 ? select_rcv_wscale() : 0; SOCKBUF_LOCK(&so->so_rcv); /* opt0 rcv_bufsiz initially, assumes its normal meaning later */ rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ); SOCKBUF_UNLOCK(&so->so_rcv); save_qids_in_mbuf(m, vi, &settings); get_qids_from_mbuf(m, NULL, &rxqid); if (is_t4(sc)) INIT_TP_WR_MIT_CPL(rpl, CPL_PASS_ACCEPT_RPL, tid); else { struct cpl_t5_pass_accept_rpl *rpl5 = (void *)rpl; INIT_TP_WR_MIT_CPL(rpl5, CPL_PASS_ACCEPT_RPL, tid); } ulp_mode = select_ulp_mode(so, sc, &settings); switch (ulp_mode) { case ULP_MODE_TCPDDP: synqe->flags |= TPF_SYNQE_TCPDDP; break; case ULP_MODE_TLS: synqe->flags |= TPF_SYNQE_TLS; break; } rpl->opt0 = calc_opt0(so, vi, e, mtu_idx, rscale, rx_credits, ulp_mode, &settings); rpl->opt2 = calc_opt2p(sc, pi, rxqid, &cpl->tcpopt, &th, ulp_mode, CC_ALGO(intotcpcb(inp)), &settings); synqe->tid = tid; synqe->lctx = lctx; synqe->syn = m; m = NULL; refcount_init(&synqe->refcnt, 1); /* 1 means extra hold */ synqe->l2e_idx = e->idx; synqe->rcv_bufsize = rx_credits; atomic_store_rel_ptr(&synqe->wr, (uintptr_t)wr); insert_tid(sc, tid, synqe, ntids); TAILQ_INSERT_TAIL(&lctx->synq, synqe, link); hold_synqe(synqe); /* hold for the duration it's in the synq */ hold_lctx(lctx); /* A synqe on the list has a ref on its lctx */ /* * If all goes well t4_syncache_respond will get called during * syncache_add. Note that syncache_add releases the pcb lock. */ toe_syncache_add(&inc, &to, &th, inp, tod, synqe); INP_UNLOCK_ASSERT(inp); /* ok to assert, we have a ref on the inp */ /* * If we replied during syncache_add (synqe->wr has been consumed), * good. Otherwise, set it to 0 so that further syncache_respond * attempts by the kernel will be ignored. */ if (atomic_cmpset_ptr(&synqe->wr, (uintptr_t)wr, 0)) { /* * syncache may or may not have a hold on the synqe, which may * or may not be stashed in the original SYN mbuf passed to us. * Just copy it over instead of dealing with all possibilities. */ m = m_dup(synqe->syn, M_NOWAIT); if (m) m->m_pkthdr.rcvif = hw_ifp; remove_tid(sc, synqe->tid, ntids); free(wr, M_CXGBE); /* Yank the synqe out of the lctx synq. */ INP_WLOCK(inp); TAILQ_REMOVE(&lctx->synq, synqe, link); release_synqe(synqe); /* removed from synq list */ inp = release_lctx(sc, lctx); if (inp) INP_WUNLOCK(inp); release_synqe(synqe); /* extra hold */ REJECT_PASS_ACCEPT(); } CTR6(KTR_CXGBE, "%s: stid %u, tid %u, lctx %p, synqe %p, SYNACK mode %d", __func__, stid, tid, lctx, synqe, ulp_mode); INP_WLOCK(inp); synqe->flags |= TPF_SYNQE_HAS_L2TE; if (__predict_false(inp->inp_flags & INP_DROPPED)) { /* * Listening socket closed but tod_listen_stop did not abort * this tid because there was no L2T entry for the tid at that * time. Abort it now. The reply to the abort will clean up. */ CTR6(KTR_CXGBE, "%s: stid %u, tid %u, lctx %p, synqe %p (0x%x), ABORT", __func__, stid, tid, lctx, synqe, synqe->flags); if (!(synqe->flags & TPF_SYNQE_EXPANDED)) send_reset_synqe(tod, synqe); INP_WUNLOCK(inp); CURVNET_RESTORE(); release_synqe(synqe); /* extra hold */ return (__LINE__); } INP_WUNLOCK(inp); CURVNET_RESTORE(); release_synqe(synqe); /* extra hold */ return (0); reject: CURVNET_RESTORE(); CTR4(KTR_CXGBE, "%s: stid %u, tid %u, REJECT (%d)", __func__, stid, tid, reject_reason); if (e) t4_l2t_release(e); release_tid(sc, tid, lctx->ctrlq); if (__predict_true(m != NULL)) { m_adj(m, sizeof(*cpl)); m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m->m_pkthdr.csum_data = 0xffff; hw_ifp->if_input(hw_ifp, m); } return (reject_reason); } static void synqe_to_protohdrs(struct adapter *sc, struct synq_entry *synqe, const struct cpl_pass_establish *cpl, struct in_conninfo *inc, struct tcphdr *th, struct tcpopt *to) { uint16_t tcp_opt = be16toh(cpl->tcp_opt); /* start off with the original SYN */ pass_accept_req_to_protohdrs(sc, synqe->syn, inc, th); /* modify parts to make it look like the ACK to our SYN|ACK */ th->th_flags = TH_ACK; th->th_ack = synqe->iss + 1; th->th_seq = be32toh(cpl->rcv_isn); bzero(to, sizeof(*to)); if (G_TCPOPT_TSTAMP(tcp_opt)) { to->to_flags |= TOF_TS; to->to_tsecr = synqe->ts; } } static int do_pass_establish(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; struct vi_info *vi; struct ifnet *ifp; const struct cpl_pass_establish *cpl = (const void *)(rss + 1); #if defined(KTR) || defined(INVARIANTS) unsigned int stid = G_PASS_OPEN_TID(be32toh(cpl->tos_stid)); #endif unsigned int tid = GET_TID(cpl); struct synq_entry *synqe = lookup_tid(sc, tid); struct listen_ctx *lctx = synqe->lctx; struct inpcb *inp = lctx->inp, *new_inp; struct socket *so; struct tcphdr th; struct tcpopt to; struct in_conninfo inc; struct toepcb *toep; u_int txqid, rxqid; #ifdef INVARIANTS unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); #endif KASSERT(opcode == CPL_PASS_ESTABLISH, ("%s: unexpected opcode 0x%x", __func__, opcode)); KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); KASSERT(lctx->stid == stid, ("%s: lctx stid mismatch", __func__)); KASSERT(synqe->flags & TPF_SYNQE, ("%s: tid %u (ctx %p) not a synqe", __func__, tid, synqe)); CURVNET_SET(lctx->vnet); INP_INFO_RLOCK(&V_tcbinfo); /* for syncache_expand */ INP_WLOCK(inp); CTR6(KTR_CXGBE, "%s: stid %u, tid %u, synqe %p (0x%x), inp_flags 0x%x", __func__, stid, tid, synqe, synqe->flags, inp->inp_flags); if (__predict_false(inp->inp_flags & INP_DROPPED)) { if (synqe->flags & TPF_SYNQE_HAS_L2TE) { KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, ("%s: listen socket closed but tid %u not aborted.", __func__, tid)); } INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); return (0); } ifp = synqe->syn->m_pkthdr.rcvif; vi = ifp->if_softc; KASSERT(vi->pi->adapter == sc, ("%s: vi %p, sc %p mismatch", __func__, vi, sc)); get_qids_from_mbuf(synqe->syn, &txqid, &rxqid); KASSERT(rxqid == iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0], ("%s: CPL arrived on unexpected rxq. %d %d", __func__, rxqid, (int)(iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0]))); toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT); if (toep == NULL) { reset: /* * The reply to this abort will perform final cleanup. There is * no need to check for HAS_L2TE here. We can be here only if * we responded to the PASS_ACCEPT_REQ, and our response had the * L2T idx. */ send_reset_synqe(TOEDEV(ifp), synqe); INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); return (0); } toep->tid = tid; toep->l2te = &sc->l2t->l2tab[synqe->l2e_idx]; if (synqe->flags & TPF_SYNQE_TCPDDP) set_ulp_mode(toep, ULP_MODE_TCPDDP); else if (synqe->flags & TPF_SYNQE_TLS) set_ulp_mode(toep, ULP_MODE_TLS); else set_ulp_mode(toep, ULP_MODE_NONE); /* opt0 rcv_bufsiz initially, assumes its normal meaning later */ toep->rx_credits = synqe->rcv_bufsize; so = inp->inp_socket; KASSERT(so != NULL, ("%s: socket is NULL", __func__)); /* Come up with something that syncache_expand should be ok with. */ synqe_to_protohdrs(sc, synqe, cpl, &inc, &th, &to); /* * No more need for anything in the mbuf that carried the * CPL_PASS_ACCEPT_REQ. Drop the CPL_PASS_ESTABLISH and toep pointer * there. XXX: bad form but I don't want to increase the size of synqe. */ m = synqe->syn; KASSERT(sizeof(*cpl) + sizeof(toep) <= m->m_len, ("%s: no room in mbuf %p (m_len %d)", __func__, m, m->m_len)); bcopy(cpl, mtod(m, void *), sizeof(*cpl)); *(struct toepcb **)(mtod(m, struct cpl_pass_establish *) + 1) = toep; if (!toe_syncache_expand(&inc, &to, &th, &so) || so == NULL) { free_toepcb(toep); goto reset; } /* New connection inpcb is already locked by syncache_expand(). */ new_inp = sotoinpcb(so); INP_WLOCK_ASSERT(new_inp); MPASS(so->so_vnet == lctx->vnet); toep->vnet = lctx->vnet; if (inc.inc_flags & INC_ISIPV6) toep->ce = t4_hold_lip(sc, &inc.inc6_laddr, lctx->ce); /* * This is for the unlikely case where the syncache entry that we added * has been evicted from the syncache, but the syncache_expand above * works because of syncookies. * * XXX: we've held the tcbinfo lock throughout so there's no risk of * anyone accept'ing a connection before we've installed our hooks, but * this somewhat defeats the purpose of having a tod_offload_socket :-( */ if (__predict_false(!(synqe->flags & TPF_SYNQE_EXPANDED))) { tcp_timer_activate(intotcpcb(new_inp), TT_KEEP, 0); t4_offload_socket(TOEDEV(ifp), synqe, so); } INP_WUNLOCK(new_inp); /* Done with the synqe */ TAILQ_REMOVE(&lctx->synq, synqe, link); inp = release_lctx(sc, lctx); if (inp != NULL) INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); release_synqe(synqe); return (0); } void t4_init_listen_cpl_handlers(void) { t4_register_cpl_handler(CPL_PASS_OPEN_RPL, do_pass_open_rpl); t4_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_close_server_rpl); t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_pass_accept_req); t4_register_cpl_handler(CPL_PASS_ESTABLISH, do_pass_establish); } void t4_uninit_listen_cpl_handlers(void) { t4_register_cpl_handler(CPL_PASS_OPEN_RPL, NULL); t4_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, NULL); t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, NULL); t4_register_cpl_handler(CPL_PASS_ESTABLISH, NULL); } #endif Index: stable/11/sys/dev/cxgbe/tom/t4_tom.c =================================================================== --- stable/11/sys/dev/cxgbe/tom/t4_tom.c (revision 346966) +++ stable/11/sys/dev/cxgbe/tom/t4_tom.c (revision 346967) @@ -1,1313 +1,1309 @@ /*- * Copyright (c) 2012 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TCPSTATES #include #include #include #include #ifdef TCP_OFFLOAD #include "common/common.h" #include "common/t4_msg.h" #include "common/t4_regs.h" #include "common/t4_regs_values.h" #include "common/t4_tcb.h" #include "t4_clip.h" #include "tom/t4_tom_l2t.h" #include "tom/t4_tom.h" #include "tom/t4_tls.h" static struct protosw toe_protosw; static struct pr_usrreqs toe_usrreqs; static struct protosw toe6_protosw; static struct pr_usrreqs toe6_usrreqs; /* Module ops */ static int t4_tom_mod_load(void); static int t4_tom_mod_unload(void); static int t4_tom_modevent(module_t, int, void *); /* ULD ops and helpers */ static int t4_tom_activate(struct adapter *); static int t4_tom_deactivate(struct adapter *); static struct uld_info tom_uld_info = { .uld_id = ULD_TOM, .activate = t4_tom_activate, .deactivate = t4_tom_deactivate, }; static void release_offload_resources(struct toepcb *); static int alloc_tid_tabs(struct tid_info *); static void free_tid_tabs(struct tid_info *); static void free_tom_data(struct adapter *, struct tom_data *); static void reclaim_wr_resources(void *, int); struct toepcb * alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct toepcb *toep; int tx_credits, txsd_total, len; /* * The firmware counts tx work request credits in units of 16 bytes * each. Reserve room for an ABORT_REQ so the driver never has to worry * about tx credits if it wants to abort a connection. */ tx_credits = sc->params.ofldq_wr_cred; tx_credits -= howmany(sizeof(struct cpl_abort_req), 16); /* * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte * immediate payload, and firmware counts tx work request credits in * units of 16 byte. Calculate the maximum work requests possible. */ txsd_total = tx_credits / howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16); KASSERT(txqid >= vi->first_ofld_txq && txqid < vi->first_ofld_txq + vi->nofldtxq, ("%s: txqid %d for vi %p (first %d, n %d)", __func__, txqid, vi, vi->first_ofld_txq, vi->nofldtxq)); KASSERT(rxqid >= vi->first_ofld_rxq && rxqid < vi->first_ofld_rxq + vi->nofldrxq, ("%s: rxqid %d for vi %p (first %d, n %d)", __func__, rxqid, vi, vi->first_ofld_rxq, vi->nofldrxq)); len = offsetof(struct toepcb, txsd) + txsd_total * sizeof(struct ofld_tx_sdesc); toep = malloc(len, M_CXGBE, M_ZERO | flags); if (toep == NULL) return (NULL); refcount_init(&toep->refcount, 1); toep->td = sc->tom_softc; toep->vi = vi; toep->tc_idx = -1; toep->tx_total = tx_credits; toep->tx_credits = tx_credits; toep->ofld_txq = &sc->sge.ofld_txq[txqid]; toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid]; toep->ctrlq = &sc->sge.ctrlq[pi->port_id]; mbufq_init(&toep->ulp_pduq, INT_MAX); mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX); toep->txsd_total = txsd_total; toep->txsd_avail = txsd_total; toep->txsd_pidx = 0; toep->txsd_cidx = 0; aiotx_init_toep(toep); return (toep); } struct toepcb * hold_toepcb(struct toepcb *toep) { refcount_acquire(&toep->refcount); return (toep); } void free_toepcb(struct toepcb *toep) { if (refcount_release(&toep->refcount) == 0) return; KASSERT(!(toep->flags & TPF_ATTACHED), ("%s: attached to an inpcb", __func__)); KASSERT(!(toep->flags & TPF_CPL_PENDING), ("%s: CPL pending", __func__)); if (toep->ulp_mode == ULP_MODE_TCPDDP) ddp_uninit_toep(toep); tls_uninit_toep(toep); free(toep, M_CXGBE); } /* * Set up the socket for TCP offload. */ void offload_socket(struct socket *so, struct toepcb *toep) { struct tom_data *td = toep->td; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); struct sockbuf *sb; INP_WLOCK_ASSERT(inp); /* Update socket */ sb = &so->so_snd; SOCKBUF_LOCK(sb); sb->sb_flags |= SB_NOCOALESCE; SOCKBUF_UNLOCK(sb); sb = &so->so_rcv; SOCKBUF_LOCK(sb); sb->sb_flags |= SB_NOCOALESCE; if (inp->inp_vflag & INP_IPV6) so->so_proto = &toe6_protosw; else so->so_proto = &toe_protosw; SOCKBUF_UNLOCK(sb); /* Update TCP PCB */ tp->tod = &td->tod; tp->t_toe = toep; tp->t_flags |= TF_TOE; /* Install an extra hold on inp */ toep->inp = inp; toep->flags |= TPF_ATTACHED; in_pcbref(inp); /* Add the TOE PCB to the active list */ mtx_lock(&td->toep_list_lock); TAILQ_INSERT_HEAD(&td->toep_list, toep, link); mtx_unlock(&td->toep_list_lock); } /* This is _not_ the normal way to "unoffload" a socket. */ void undo_offload_socket(struct socket *so) { struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); struct toepcb *toep = tp->t_toe; struct tom_data *td = toep->td; struct sockbuf *sb; INP_WLOCK_ASSERT(inp); sb = &so->so_snd; SOCKBUF_LOCK(sb); sb->sb_flags &= ~SB_NOCOALESCE; SOCKBUF_UNLOCK(sb); sb = &so->so_rcv; SOCKBUF_LOCK(sb); sb->sb_flags &= ~SB_NOCOALESCE; SOCKBUF_UNLOCK(sb); tp->tod = NULL; tp->t_toe = NULL; tp->t_flags &= ~TF_TOE; toep->inp = NULL; toep->flags &= ~TPF_ATTACHED; if (in_pcbrele_wlocked(inp)) panic("%s: inp freed.", __func__); mtx_lock(&td->toep_list_lock); TAILQ_REMOVE(&td->toep_list, toep, link); mtx_unlock(&td->toep_list_lock); } static void release_offload_resources(struct toepcb *toep) { struct tom_data *td = toep->td; struct adapter *sc = td_adapter(td); int tid = toep->tid; KASSERT(!(toep->flags & TPF_CPL_PENDING), ("%s: %p has CPL pending.", __func__, toep)); KASSERT(!(toep->flags & TPF_ATTACHED), ("%s: %p is still attached.", __func__, toep)); CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)", __func__, toep, tid, toep->l2te, toep->ce); /* * These queues should have been emptied at approximately the same time * that a normal connection's socket's so_snd would have been purged or * drained. Do _not_ clean up here. */ MPASS(mbufq_len(&toep->ulp_pduq) == 0); MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0); #ifdef INVARIANTS if (toep->ulp_mode == ULP_MODE_TCPDDP) ddp_assert_empty(toep); #endif if (toep->l2te) t4_l2t_release(toep->l2te); if (tid >= 0) { remove_tid(sc, tid, toep->ce ? 2 : 1); release_tid(sc, tid, toep->ctrlq); } if (toep->ce) t4_release_lip(sc, toep->ce); if (toep->tc_idx != -1) t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->tc_idx); mtx_lock(&td->toep_list_lock); TAILQ_REMOVE(&td->toep_list, toep, link); mtx_unlock(&td->toep_list_lock); free_toepcb(toep); } /* * The kernel is done with the TCP PCB and this is our opportunity to unhook the * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no * pending CPL) then it is time to release all resources tied to the toepcb. * * Also gets called when an offloaded active open fails and the TOM wants the * kernel to take the TCP PCB back. */ static void t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp) { #if defined(KTR) || defined(INVARIANTS) struct inpcb *inp = tp->t_inpcb; #endif struct toepcb *toep = tp->t_toe; INP_WLOCK_ASSERT(inp); KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); KASSERT(toep->flags & TPF_ATTACHED, ("%s: not attached", __func__)); #ifdef KTR if (tp->t_state == TCPS_SYN_SENT) { CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)", __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags); } else { CTR6(KTR_CXGBE, "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)", toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp, inp->inp_flags); } #endif tp->t_toe = NULL; tp->t_flags &= ~TF_TOE; toep->flags &= ~TPF_ATTACHED; if (!(toep->flags & TPF_CPL_PENDING)) release_offload_resources(toep); } /* * setsockopt handler. */ static void t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name) { struct adapter *sc = tod->tod_softc; struct toepcb *toep = tp->t_toe; if (dir == SOPT_GET) return; CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name); switch (name) { case TCP_NODELAY: if (tp->t_state != TCPS_ESTABLISHED) break; t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, V_TF_NAGLE(1), V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1), 0, 0); break; default: break; } } static inline int get_tcb_bit(u_char *tcb, int bit) { int ix, shift; ix = 127 - (bit >> 3); shift = bit & 0x7; return ((tcb[ix] >> shift) & 1); } static inline uint64_t get_tcb_bits(u_char *tcb, int hi, int lo) { uint64_t rc = 0; while (hi >= lo) { rc = (rc << 1) | get_tcb_bit(tcb, hi); --hi; } return (rc); } /* * Called by the kernel to allow the TOE driver to "refine" values filled up in * the tcp_info for an offloaded connection. */ static void t4_tcp_info(struct toedev *tod, struct tcpcb *tp, struct tcp_info *ti) { int i, j, k, rc; struct adapter *sc = tod->tod_softc; struct toepcb *toep = tp->t_toe; uint32_t addr, v; uint32_t buf[TCB_SIZE / sizeof(uint32_t)]; u_char *tcb, tmp; INP_WLOCK_ASSERT(tp->t_inpcb); MPASS(ti != NULL); addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + toep->tid * TCB_SIZE; rc = read_via_memwin(sc, 2, addr, &buf[0], TCB_SIZE); if (rc != 0) return; tcb = (u_char *)&buf[0]; for (i = 0, j = TCB_SIZE - 16; i < j; i += 16, j -= 16) { for (k = 0; k < 16; k++) { tmp = tcb[i + k]; tcb[i + k] = tcb[j + k]; tcb[j + k] = tmp; } } ti->tcpi_state = get_tcb_bits(tcb, 115, 112); v = get_tcb_bits(tcb, 271, 256); ti->tcpi_rtt = tcp_ticks_to_us(sc, v); v = get_tcb_bits(tcb, 287, 272); ti->tcpi_rttvar = tcp_ticks_to_us(sc, v); ti->tcpi_snd_ssthresh = get_tcb_bits(tcb, 487, 460); ti->tcpi_snd_cwnd = get_tcb_bits(tcb, 459, 432); ti->tcpi_rcv_nxt = get_tcb_bits(tcb, 553, 522); ti->tcpi_snd_nxt = get_tcb_bits(tcb, 319, 288) - get_tcb_bits(tcb, 375, 348); /* Receive window being advertised by us. */ ti->tcpi_rcv_space = get_tcb_bits(tcb, 581, 554); /* Send window ceiling. */ v = get_tcb_bits(tcb, 159, 144) << get_tcb_bits(tcb, 131, 128); ti->tcpi_snd_wnd = min(v, ti->tcpi_snd_cwnd); } /* * The TOE driver will not receive any more CPLs for the tid associated with the * toepcb; release the hold on the inpcb. */ void final_cpl_received(struct toepcb *toep) { struct inpcb *inp = toep->inp; KASSERT(inp != NULL, ("%s: inp is NULL", __func__)); INP_WLOCK_ASSERT(inp); KASSERT(toep->flags & TPF_CPL_PENDING, ("%s: CPL not pending already?", __func__)); CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)", __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags); if (toep->ulp_mode == ULP_MODE_TCPDDP) release_ddp_resources(toep); toep->inp = NULL; toep->flags &= ~TPF_CPL_PENDING; mbufq_drain(&toep->ulp_pdu_reclaimq); if (!(toep->flags & TPF_ATTACHED)) release_offload_resources(toep); if (!in_pcbrele_wlocked(inp)) INP_WUNLOCK(inp); } void insert_tid(struct adapter *sc, int tid, void *ctx, int ntids) { struct tid_info *t = &sc->tids; MPASS(tid >= t->tid_base); MPASS(tid - t->tid_base < t->ntids); t->tid_tab[tid - t->tid_base] = ctx; atomic_add_int(&t->tids_in_use, ntids); } void * lookup_tid(struct adapter *sc, int tid) { struct tid_info *t = &sc->tids; return (t->tid_tab[tid - t->tid_base]); } void update_tid(struct adapter *sc, int tid, void *ctx) { struct tid_info *t = &sc->tids; t->tid_tab[tid - t->tid_base] = ctx; } void remove_tid(struct adapter *sc, int tid, int ntids) { struct tid_info *t = &sc->tids; t->tid_tab[tid - t->tid_base] = NULL; atomic_subtract_int(&t->tids_in_use, ntids); } /* * What mtu_idx to use, given a 4-tuple. Note that both s->mss and tcp_mssopt * have the MSS that we should advertise in our SYN. Advertised MSS doesn't * account for any TCP options so the effective MSS (only payload, no headers or * options) could be different. We fill up tp->t_maxseg with the effective MSS * at the end of the 3-way handshake. */ int find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, struct offload_settings *s) { unsigned short *mtus = &sc->params.mtus[0]; int i, mss, mtu; MPASS(inc != NULL); mss = s->mss > 0 ? s->mss : tcp_mssopt(inc); if (inc->inc_flags & INC_ISIPV6) mtu = mss + sizeof(struct ip6_hdr) + sizeof(struct tcphdr); else mtu = mss + sizeof(struct ip) + sizeof(struct tcphdr); for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mtu; i++) continue; return (i); } /* * Determine the receive window size for a socket. */ u_long select_rcv_wnd(struct socket *so) { unsigned long wnd; SOCKBUF_LOCK_ASSERT(&so->so_rcv); wnd = sbspace(&so->so_rcv); if (wnd < MIN_RCV_WND) wnd = MIN_RCV_WND; return min(wnd, MAX_RCV_WND); } int select_rcv_wscale(void) { int wscale = 0; unsigned long space = sb_max; if (space > MAX_RCV_WND) space = MAX_RCV_WND; while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space) wscale++; return (wscale); } /* * socket so could be a listening socket too. */ uint64_t calc_opt0(struct socket *so, struct vi_info *vi, struct l2t_entry *e, int mtu_idx, int rscale, int rx_credits, int ulp_mode, struct offload_settings *s) { int keepalive; uint64_t opt0; MPASS(so != NULL); MPASS(vi != NULL); KASSERT(rx_credits <= M_RCV_BUFSIZ, ("%s: rcv_bufsiz too high", __func__)); opt0 = F_TCAM_BYPASS | V_WND_SCALE(rscale) | V_MSS_IDX(mtu_idx) | V_ULP_MODE(ulp_mode) | V_RCV_BUFSIZ(rx_credits) | V_L2T_IDX(e->idx) | V_SMAC_SEL(vi->smt_idx) | V_TX_CHAN(vi->pi->tx_chan); keepalive = tcp_always_keepalive || so_options_get(so) & SO_KEEPALIVE; opt0 |= V_KEEP_ALIVE(keepalive != 0); if (s->nagle < 0) { struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); opt0 |= V_NAGLE((tp->t_flags & TF_NODELAY) == 0); } else opt0 |= V_NAGLE(s->nagle != 0); return htobe64(opt0); } uint64_t select_ntuple(struct vi_info *vi, struct l2t_entry *e) { struct adapter *sc = vi->pi->adapter; struct tp_params *tp = &sc->params.tp; - uint16_t viid = vi->viid; uint64_t ntuple = 0; /* * Initialize each of the fields which we care about which are present * in the Compressed Filter Tuple. */ if (tp->vlan_shift >= 0 && e->vlan != CPL_L2T_VLAN_NONE) ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift; if (tp->port_shift >= 0) ntuple |= (uint64_t)e->lport << tp->port_shift; if (tp->protocol_shift >= 0) ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift; if (tp->vnic_shift >= 0 && tp->ingress_config & F_VNIC) { - uint32_t vf = G_FW_VIID_VIN(viid); - uint32_t pf = G_FW_VIID_PFN(viid); - uint32_t vld = G_FW_VIID_VIVLD(viid); - - ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vf) | V_FT_VNID_ID_PF(pf) | - V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; + ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vi->vin) | + V_FT_VNID_ID_PF(sc->pf) | V_FT_VNID_ID_VLD(vi->vfvld)) << + tp->vnic_shift; } if (is_t4(sc)) return (htobe32((uint32_t)ntuple)); else return (htobe64(V_FILTER_TUPLE(ntuple))); } static int is_tls_sock(struct socket *so, struct adapter *sc) { struct inpcb *inp = sotoinpcb(so); int i, rc; /* XXX: Eventually add a SO_WANT_TLS socket option perhaps? */ rc = 0; ADAPTER_LOCK(sc); for (i = 0; i < sc->tt.num_tls_rx_ports; i++) { if (inp->inp_lport == htons(sc->tt.tls_rx_ports[i]) || inp->inp_fport == htons(sc->tt.tls_rx_ports[i])) { rc = 1; break; } } ADAPTER_UNLOCK(sc); return (rc); } int select_ulp_mode(struct socket *so, struct adapter *sc, struct offload_settings *s) { if (can_tls_offload(sc) && (s->tls > 0 || (s->tls < 0 && is_tls_sock(so, sc)))) return (ULP_MODE_TLS); else if (s->ddp > 0 || (s->ddp < 0 && sc->tt.ddp && (so->so_options & SO_NO_DDP) == 0)) return (ULP_MODE_TCPDDP); else return (ULP_MODE_NONE); } void set_ulp_mode(struct toepcb *toep, int ulp_mode) { CTR4(KTR_CXGBE, "%s: toep %p (tid %d) ulp_mode %d", __func__, toep, toep->tid, ulp_mode); toep->ulp_mode = ulp_mode; tls_init_toep(toep); if (toep->ulp_mode == ULP_MODE_TCPDDP) ddp_init_toep(toep); } int negative_advice(int status) { return (status == CPL_ERR_RTX_NEG_ADVICE || status == CPL_ERR_PERSIST_NEG_ADVICE || status == CPL_ERR_KEEPALV_NEG_ADVICE); } static int alloc_tid_tab(struct tid_info *t, int flags) { MPASS(t->ntids > 0); MPASS(t->tid_tab == NULL); t->tid_tab = malloc(t->ntids * sizeof(*t->tid_tab), M_CXGBE, M_ZERO | flags); if (t->tid_tab == NULL) return (ENOMEM); atomic_store_rel_int(&t->tids_in_use, 0); return (0); } static void free_tid_tab(struct tid_info *t) { KASSERT(t->tids_in_use == 0, ("%s: %d tids still in use.", __func__, t->tids_in_use)); free(t->tid_tab, M_CXGBE); t->tid_tab = NULL; } static int alloc_stid_tab(struct tid_info *t, int flags) { MPASS(t->nstids > 0); MPASS(t->stid_tab == NULL); t->stid_tab = malloc(t->nstids * sizeof(*t->stid_tab), M_CXGBE, M_ZERO | flags); if (t->stid_tab == NULL) return (ENOMEM); mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF); t->stids_in_use = 0; TAILQ_INIT(&t->stids); t->nstids_free_head = t->nstids; return (0); } static void free_stid_tab(struct tid_info *t) { KASSERT(t->stids_in_use == 0, ("%s: %d tids still in use.", __func__, t->stids_in_use)); if (mtx_initialized(&t->stid_lock)) mtx_destroy(&t->stid_lock); free(t->stid_tab, M_CXGBE); t->stid_tab = NULL; } static void free_tid_tabs(struct tid_info *t) { free_tid_tab(t); free_atid_tab(t); free_stid_tab(t); } static int alloc_tid_tabs(struct tid_info *t) { int rc; rc = alloc_tid_tab(t, M_NOWAIT); if (rc != 0) goto failed; rc = alloc_atid_tab(t, M_NOWAIT); if (rc != 0) goto failed; rc = alloc_stid_tab(t, M_NOWAIT); if (rc != 0) goto failed; return (0); failed: free_tid_tabs(t); return (rc); } static void free_tom_data(struct adapter *sc, struct tom_data *td) { ASSERT_SYNCHRONIZED_OP(sc); KASSERT(TAILQ_EMPTY(&td->toep_list), ("%s: TOE PCB list is not empty.", __func__)); KASSERT(td->lctx_count == 0, ("%s: lctx hash table is not empty.", __func__)); t4_free_ppod_region(&td->pr); if (td->listen_mask != 0) hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask); if (mtx_initialized(&td->unsent_wr_lock)) mtx_destroy(&td->unsent_wr_lock); if (mtx_initialized(&td->lctx_hash_lock)) mtx_destroy(&td->lctx_hash_lock); if (mtx_initialized(&td->toep_list_lock)) mtx_destroy(&td->toep_list_lock); free_tid_tabs(&sc->tids); free(td, M_CXGBE); } static char * prepare_pkt(int open_type, uint16_t vtag, struct inpcb *inp, int *pktlen, int *buflen) { char *pkt; struct tcphdr *th; int ipv6, len; const int maxlen = max(sizeof(struct ether_header), sizeof(struct ether_vlan_header)) + max(sizeof(struct ip), sizeof(struct ip6_hdr)) + sizeof(struct tcphdr); MPASS(open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN); pkt = malloc(maxlen, M_CXGBE, M_ZERO | M_NOWAIT); if (pkt == NULL) return (NULL); ipv6 = inp->inp_vflag & INP_IPV6; len = 0; if (vtag == 0xffff) { struct ether_header *eh = (void *)pkt; if (ipv6) eh->ether_type = htons(ETHERTYPE_IPV6); else eh->ether_type = htons(ETHERTYPE_IP); len += sizeof(*eh); } else { struct ether_vlan_header *evh = (void *)pkt; evh->evl_encap_proto = htons(ETHERTYPE_VLAN); evh->evl_tag = htons(vtag); if (ipv6) evh->evl_proto = htons(ETHERTYPE_IPV6); else evh->evl_proto = htons(ETHERTYPE_IP); len += sizeof(*evh); } if (ipv6) { struct ip6_hdr *ip6 = (void *)&pkt[len]; ip6->ip6_vfc = IPV6_VERSION; ip6->ip6_plen = htons(sizeof(struct tcphdr)); ip6->ip6_nxt = IPPROTO_TCP; if (open_type == OPEN_TYPE_ACTIVE) { ip6->ip6_src = inp->in6p_laddr; ip6->ip6_dst = inp->in6p_faddr; } else if (open_type == OPEN_TYPE_LISTEN) { ip6->ip6_src = inp->in6p_laddr; ip6->ip6_dst = ip6->ip6_src; } len += sizeof(*ip6); } else { struct ip *ip = (void *)&pkt[len]; ip->ip_v = IPVERSION; ip->ip_hl = sizeof(*ip) >> 2; ip->ip_tos = inp->inp_ip_tos; ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr)); ip->ip_ttl = inp->inp_ip_ttl; ip->ip_p = IPPROTO_TCP; if (open_type == OPEN_TYPE_ACTIVE) { ip->ip_src = inp->inp_laddr; ip->ip_dst = inp->inp_faddr; } else if (open_type == OPEN_TYPE_LISTEN) { ip->ip_src = inp->inp_laddr; ip->ip_dst = ip->ip_src; } len += sizeof(*ip); } th = (void *)&pkt[len]; if (open_type == OPEN_TYPE_ACTIVE) { th->th_sport = inp->inp_lport; /* network byte order already */ th->th_dport = inp->inp_fport; /* ditto */ } else if (open_type == OPEN_TYPE_LISTEN) { th->th_sport = inp->inp_lport; /* network byte order already */ th->th_dport = th->th_sport; } len += sizeof(th); *pktlen = *buflen = len; return (pkt); } const struct offload_settings * lookup_offload_policy(struct adapter *sc, int open_type, struct mbuf *m, uint16_t vtag, struct inpcb *inp) { const struct t4_offload_policy *op; char *pkt; struct offload_rule *r; int i, matched, pktlen, buflen; static const struct offload_settings allow_offloading_settings = { .offload = 1, .rx_coalesce = -1, .cong_algo = -1, .sched_class = -1, .tstamp = -1, .sack = -1, .nagle = -1, .ecn = -1, .ddp = -1, .tls = -1, .txq = -1, .rxq = -1, .mss = -1, }; static const struct offload_settings disallow_offloading_settings = { .offload = 0, /* rest is irrelevant when offload is off. */ }; rw_assert(&sc->policy_lock, RA_LOCKED); /* * If there's no Connection Offloading Policy attached to the device * then we need to return a default static policy. If * "cop_managed_offloading" is true, then we need to disallow * offloading until a COP is attached to the device. Otherwise we * allow offloading ... */ op = sc->policy; if (op == NULL) { if (sc->tt.cop_managed_offloading) return (&disallow_offloading_settings); else return (&allow_offloading_settings); } switch (open_type) { case OPEN_TYPE_ACTIVE: case OPEN_TYPE_LISTEN: pkt = prepare_pkt(open_type, vtag, inp, &pktlen, &buflen); break; case OPEN_TYPE_PASSIVE: MPASS(m != NULL); pkt = mtod(m, char *); MPASS(*pkt == CPL_PASS_ACCEPT_REQ); pkt += sizeof(struct cpl_pass_accept_req); pktlen = m->m_pkthdr.len - sizeof(struct cpl_pass_accept_req); buflen = m->m_len - sizeof(struct cpl_pass_accept_req); break; default: MPASS(0); return (&disallow_offloading_settings); } if (pkt == NULL || pktlen == 0 || buflen == 0) return (&disallow_offloading_settings); matched = 0; r = &op->rule[0]; for (i = 0; i < op->nrules; i++, r++) { if (r->open_type != open_type && r->open_type != OPEN_TYPE_DONTCARE) { continue; } matched = bpf_filter(r->bpf_prog.bf_insns, pkt, pktlen, buflen); if (matched) break; } if (open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN) free(pkt, M_CXGBE); return (matched ? &r->settings : &disallow_offloading_settings); } static void reclaim_wr_resources(void *arg, int count) { struct tom_data *td = arg; STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list); struct cpl_act_open_req *cpl; u_int opcode, atid; struct wrqe *wr; struct adapter *sc; mtx_lock(&td->unsent_wr_lock); STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe); mtx_unlock(&td->unsent_wr_lock); while ((wr = STAILQ_FIRST(&twr_list)) != NULL) { STAILQ_REMOVE_HEAD(&twr_list, link); cpl = wrtod(wr); opcode = GET_OPCODE(cpl); switch (opcode) { case CPL_ACT_OPEN_REQ: case CPL_ACT_OPEN_REQ6: atid = G_TID_TID(be32toh(OPCODE_TID(cpl))); sc = td_adapter(td); CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid); act_open_failure_cleanup(sc, atid, EHOSTUNREACH); free(wr, M_CXGBE); break; default: log(LOG_ERR, "%s: leaked work request %p, wr_len %d, " "opcode %x\n", __func__, wr, wr->wr_len, opcode); /* WR not freed here; go look at it with a debugger. */ } } } /* * Ground control to Major TOM * Commencing countdown, engines on */ static int t4_tom_activate(struct adapter *sc) { struct tom_data *td; struct toedev *tod; struct vi_info *vi; int i, rc, v; ASSERT_SYNCHRONIZED_OP(sc); /* per-adapter softc for TOM */ td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT); if (td == NULL) return (ENOMEM); /* List of TOE PCBs and associated lock */ mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF); TAILQ_INIT(&td->toep_list); /* Listen context */ mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF); td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE, &td->listen_mask, HASH_NOWAIT); /* List of WRs for which L2 resolution failed */ mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF); STAILQ_INIT(&td->unsent_wr_list); TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td); /* TID tables */ rc = alloc_tid_tabs(&sc->tids); if (rc != 0) goto done; rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp, t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods"); if (rc != 0) goto done; t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK, V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask); /* toedev ops */ tod = &td->tod; init_toedev(tod); tod->tod_softc = sc; tod->tod_connect = t4_connect; tod->tod_listen_start = t4_listen_start; tod->tod_listen_stop = t4_listen_stop; tod->tod_rcvd = t4_rcvd; tod->tod_output = t4_tod_output; tod->tod_send_rst = t4_send_rst; tod->tod_send_fin = t4_send_fin; tod->tod_pcb_detach = t4_pcb_detach; tod->tod_l2_update = t4_l2_update; tod->tod_syncache_added = t4_syncache_added; tod->tod_syncache_removed = t4_syncache_removed; tod->tod_syncache_respond = t4_syncache_respond; tod->tod_offload_socket = t4_offload_socket; tod->tod_ctloutput = t4_ctloutput; #if 0 tod->tod_tcp_info = t4_tcp_info; #else (void)&t4_tcp_info; #endif for_each_port(sc, i) { for_each_vi(sc->port[i], v, vi) { TOEDEV(vi->ifp) = &td->tod; } } sc->tom_softc = td; register_toedev(sc->tom_softc); done: if (rc != 0) free_tom_data(sc, td); return (rc); } static int t4_tom_deactivate(struct adapter *sc) { int rc = 0; struct tom_data *td = sc->tom_softc; ASSERT_SYNCHRONIZED_OP(sc); if (td == NULL) return (0); /* XXX. KASSERT? */ if (sc->offload_map != 0) return (EBUSY); /* at least one port has IFCAP_TOE enabled */ if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI)) return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */ mtx_lock(&td->toep_list_lock); if (!TAILQ_EMPTY(&td->toep_list)) rc = EBUSY; mtx_unlock(&td->toep_list_lock); mtx_lock(&td->lctx_hash_lock); if (td->lctx_count > 0) rc = EBUSY; mtx_unlock(&td->lctx_hash_lock); taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources); mtx_lock(&td->unsent_wr_lock); if (!STAILQ_EMPTY(&td->unsent_wr_list)) rc = EBUSY; mtx_unlock(&td->unsent_wr_lock); if (rc == 0) { unregister_toedev(sc->tom_softc); free_tom_data(sc, td); sc->tom_softc = NULL; } return (rc); } static int t4_aio_queue_tom(struct socket *so, struct kaiocb *job) { struct tcpcb *tp = so_sototcpcb(so); struct toepcb *toep = tp->t_toe; int error; if (toep->ulp_mode == ULP_MODE_TCPDDP) { error = t4_aio_queue_ddp(so, job); if (error != EOPNOTSUPP) return (error); } return (t4_aio_queue_aiotx(so, job)); } static int t4_ctloutput_tom(struct socket *so, struct sockopt *sopt) { if (sopt->sopt_level != IPPROTO_TCP) return (tcp_ctloutput(so, sopt)); switch (sopt->sopt_name) { case TCP_TLSOM_SET_TLS_CONTEXT: case TCP_TLSOM_GET_TLS_TOM: case TCP_TLSOM_CLR_TLS_TOM: case TCP_TLSOM_CLR_QUIES: return (t4_ctloutput_tls(so, sopt)); default: return (tcp_ctloutput(so, sopt)); } } static int t4_tom_mod_load(void) { struct protosw *tcp_protosw, *tcp6_protosw; /* CPL handlers */ t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl2, CPL_COOKIE_TOM); t4_init_connect_cpl_handlers(); t4_init_listen_cpl_handlers(); t4_init_cpl_io_handlers(); t4_ddp_mod_load(); t4_tls_mod_load(); tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM); if (tcp_protosw == NULL) return (ENOPROTOOPT); bcopy(tcp_protosw, &toe_protosw, sizeof(toe_protosw)); bcopy(tcp_protosw->pr_usrreqs, &toe_usrreqs, sizeof(toe_usrreqs)); toe_usrreqs.pru_aio_queue = t4_aio_queue_tom; toe_protosw.pr_ctloutput = t4_ctloutput_tom; toe_protosw.pr_usrreqs = &toe_usrreqs; tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM); if (tcp6_protosw == NULL) return (ENOPROTOOPT); bcopy(tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw)); bcopy(tcp6_protosw->pr_usrreqs, &toe6_usrreqs, sizeof(toe6_usrreqs)); toe6_usrreqs.pru_aio_queue = t4_aio_queue_tom; toe6_protosw.pr_ctloutput = t4_ctloutput_tom; toe6_protosw.pr_usrreqs = &toe6_usrreqs; return (t4_register_uld(&tom_uld_info)); } static void tom_uninit(struct adapter *sc, void *arg __unused) { if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun")) return; /* Try to free resources (works only if no port has IFCAP_TOE) */ if (uld_active(sc, ULD_TOM)) t4_deactivate_uld(sc, ULD_TOM); end_synchronized_op(sc, 0); } static int t4_tom_mod_unload(void) { t4_iterate(tom_uninit, NULL); if (t4_unregister_uld(&tom_uld_info) == EBUSY) return (EBUSY); t4_tls_mod_unload(); t4_ddp_mod_unload(); t4_uninit_connect_cpl_handlers(); t4_uninit_listen_cpl_handlers(); t4_uninit_cpl_io_handlers(); t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, NULL, CPL_COOKIE_TOM); return (0); } #endif /* TCP_OFFLOAD */ static int t4_tom_modevent(module_t mod, int cmd, void *arg) { int rc = 0; #ifdef TCP_OFFLOAD switch (cmd) { case MOD_LOAD: rc = t4_tom_mod_load(); break; case MOD_UNLOAD: rc = t4_tom_mod_unload(); break; default: rc = EINVAL; } #else printf("t4_tom: compiled without TCP_OFFLOAD support.\n"); rc = EOPNOTSUPP; #endif return (rc); } static moduledata_t t4_tom_moddata= { "t4_tom", t4_tom_modevent, 0 }; MODULE_VERSION(t4_tom, 1); MODULE_DEPEND(t4_tom, toecore, 1, 1, 1); MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1); DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY); Index: stable/11 =================================================================== --- stable/11 (revision 346966) +++ stable/11 (revision 346967) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r345334