diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h index 9cc923eaf2f6..53be2fa2588a 100644 --- a/sys/dev/cxgbe/common/common.h +++ b/sys/dev/cxgbe/common/common.h @@ -1,952 +1,951 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef __CHELSIO_COMMON_H #define __CHELSIO_COMMON_H #include "t4_hw.h" enum { MAX_NPORTS = 4, /* max # of ports */ SERNUM_LEN = 24, /* Serial # length */ EC_LEN = 16, /* E/C length */ ID_LEN = 16, /* ID length */ PN_LEN = 16, /* Part Number length */ MD_LEN = 16, /* MFG diags version length */ MACADDR_LEN = 12, /* MAC Address length */ }; enum { T4_REGMAP_SIZE = (160 * 1024), T5_REGMAP_SIZE = (332 * 1024), }; enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 }; enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST }; enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR }; enum { PAUSE_RX = 1 << 0, PAUSE_TX = 1 << 1, PAUSE_AUTONEG = 1 << 2 }; enum { /* * Real FECs. In the same order as the FEC portion of caps32 so that * the code can do (fec & M_FW_PORT_CAP32_FEC) to get all the real FECs. */ FEC_RS = 1 << 0, /* Reed-Solomon */ FEC_BASER_RS = 1 << 1, /* BASE-R, aka Firecode */ FEC_NONE = 1 << 2, /* no FEC */ /* * Pseudo FECs that translate to real FECs. The firmware knows nothing * about these and they start at M_FW_PORT_CAP32_FEC + 1. AUTO should * be set all by itself. */ FEC_AUTO = 1 << 5, FEC_MODULE = 1 << 6, /* FEC suggested by the cable/transceiver. */ }; enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; struct port_stats { u64 tx_octets; /* total # of octets in good frames */ u64 tx_frames; /* all good frames */ u64 tx_bcast_frames; /* all broadcast frames */ u64 tx_mcast_frames; /* all multicast frames */ u64 tx_ucast_frames; /* all unicast frames */ u64 tx_error_frames; /* all error frames */ u64 tx_frames_64; /* # of Tx frames in a particular range */ u64 tx_frames_65_127; u64 tx_frames_128_255; u64 tx_frames_256_511; u64 tx_frames_512_1023; u64 tx_frames_1024_1518; u64 tx_frames_1519_max; u64 tx_drop; /* # of dropped Tx frames */ u64 tx_pause; /* # of transmitted pause frames */ u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ u64 rx_octets; /* total # of octets in good frames */ u64 rx_frames; /* all good frames */ u64 rx_bcast_frames; /* all broadcast frames */ u64 rx_mcast_frames; /* all multicast frames */ u64 rx_ucast_frames; /* all unicast frames */ u64 rx_too_long; /* # of frames exceeding MTU */ u64 rx_jabber; /* # of jabber frames */ u64 rx_fcs_err; /* # of received frames with bad FCS */ u64 rx_len_err; /* # of received frames with length error */ u64 rx_symbol_err; /* symbol errors */ u64 rx_runt; /* # of short frames */ u64 rx_frames_64; /* # of Rx frames in a particular range */ u64 rx_frames_65_127; u64 rx_frames_128_255; u64 rx_frames_256_511; u64 rx_frames_512_1023; u64 rx_frames_1024_1518; u64 rx_frames_1519_max; u64 rx_pause; /* # of received pause frames */ u64 rx_ppp0; /* # of received PPP prio 0 frames */ u64 rx_ppp1; /* # of received PPP prio 1 frames */ u64 rx_ppp2; /* # of received PPP prio 2 frames */ u64 rx_ppp3; /* # of received PPP prio 3 frames */ u64 rx_ppp4; /* # of received PPP prio 4 frames */ u64 rx_ppp5; /* # of received PPP prio 5 frames */ u64 rx_ppp6; /* # of received PPP prio 6 frames */ u64 rx_ppp7; /* # of received PPP prio 7 frames */ u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ u64 rx_trunc0; /* buffer-group 0 truncated packets */ u64 rx_trunc1; /* buffer-group 1 truncated packets */ u64 rx_trunc2; /* buffer-group 2 truncated packets */ u64 rx_trunc3; /* buffer-group 3 truncated packets */ }; struct lb_port_stats { u64 octets; u64 frames; u64 bcast_frames; u64 mcast_frames; u64 ucast_frames; u64 error_frames; u64 frames_64; u64 frames_65_127; u64 frames_128_255; u64 frames_256_511; u64 frames_512_1023; u64 frames_1024_1518; u64 frames_1519_max; u64 drop; u64 ovflow0; u64 ovflow1; u64 ovflow2; u64 ovflow3; u64 trunc0; u64 trunc1; u64 trunc2; u64 trunc3; }; struct tp_tcp_stats { u32 tcp_out_rsts; u64 tcp_in_segs; u64 tcp_out_segs; u64 tcp_retrans_segs; }; struct tp_usm_stats { u32 frames; u32 drops; u64 octets; }; struct tp_tid_stats { u32 del; u32 inv; u32 act; u32 pas; }; struct tp_fcoe_stats { u32 frames_ddp; u32 frames_drop; u64 octets_ddp; }; struct tp_err_stats { u32 mac_in_errs[MAX_NCHAN]; u32 hdr_in_errs[MAX_NCHAN]; u32 tcp_in_errs[MAX_NCHAN]; u32 tnl_cong_drops[MAX_NCHAN]; u32 ofld_chan_drops[MAX_NCHAN]; u32 tnl_tx_drops[MAX_NCHAN]; u32 ofld_vlan_drops[MAX_NCHAN]; u32 tcp6_in_errs[MAX_NCHAN]; u32 ofld_no_neigh; u32 ofld_cong_defer; }; struct tp_tnl_stats { u32 out_pkt[MAX_NCHAN]; u32 in_pkt[MAX_NCHAN]; }; struct tp_proxy_stats { u32 proxy[MAX_NCHAN]; }; struct tp_cpl_stats { u32 req[MAX_NCHAN]; u32 rsp[MAX_NCHAN]; }; struct tp_rdma_stats { u32 rqe_dfr_pkt; u32 rqe_dfr_mod; }; struct sge_params { int timer_val[SGE_NTIMERS]; /* final, scaled values */ int counter_val[SGE_NCOUNTERS]; int fl_starve_threshold; int fl_starve_threshold2; int page_shift; int eq_s_qpp; int iq_s_qpp; int spg_len; int pad_boundary; int pack_boundary; int fl_pktshift; u32 sge_control; u32 sge_fl_buffer_size[SGE_FLBUF_SIZES]; }; struct tp_params { unsigned int tre; /* log2 of core clocks per TP tick */ unsigned int dack_re; /* DACK timer resolution */ unsigned int la_mask; /* what events are recorded by TP LA */ unsigned short tx_modq[MAX_NCHAN]; /* channel to modulation queue map */ - uint32_t vlan_pri_map; - uint32_t ingress_config; + uint16_t filter_mode; + uint16_t filter_mask; /* Used by TOE and hashfilters */ + int vnic_mode; uint32_t max_rx_pdu; uint32_t max_tx_pdu; - uint64_t hash_filter_mask; bool rx_pkt_encap; int8_t fcoe_shift; int8_t port_shift; int8_t vnic_shift; int8_t vlan_shift; int8_t tos_shift; int8_t protocol_shift; int8_t ethertype_shift; int8_t macmatch_shift; int8_t matchtype_shift; int8_t frag_shift; }; struct vpd_params { unsigned int cclk; u8 ec[EC_LEN + 1]; u8 sn[SERNUM_LEN + 1]; u8 id[ID_LEN + 1]; u8 pn[PN_LEN + 1]; u8 na[MACADDR_LEN + 1]; u8 md[MD_LEN + 1]; }; struct pci_params { unsigned int vpd_cap_addr; unsigned int mps; unsigned short speed; unsigned short width; }; /* * Firmware device log. */ struct devlog_params { u32 memtype; /* which memory (FW_MEMTYPE_* ) */ u32 start; /* start of log in firmware memory */ u32 size; /* size of log */ u32 addr; /* start address in flat addr space */ }; /* Stores chip specific parameters */ struct chip_params { u8 nchan; u8 pm_stats_cnt; u8 cng_ch_bits_log; /* congestion channel map bits width */ u8 nsched_cls; u8 cim_num_obq; u16 mps_rplc_size; u16 vfcount; u32 sge_fl_db; u16 mps_tcam_size; u16 rss_nentries; }; /* VF-only parameters. */ /* * Global Receive Side Scaling (RSS) parameters in host-native format. */ struct rss_params { unsigned int mode; /* RSS mode */ union { struct { u_int synmapen:1; /* SYN Map Enable */ u_int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */ u_int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */ u_int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */ u_int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */ u_int ofdmapen:1; /* Offload Map Enable */ u_int tnlmapen:1; /* Tunnel Map Enable */ u_int tnlalllookup:1; /* Tunnel All Lookup */ u_int hashtoeplitz:1; /* use Toeplitz hash */ } basicvirtual; } u; }; /* * Maximum resources provisioned for a PCI VF. */ struct vf_resources { unsigned int nvi; /* N virtual interfaces */ unsigned int neq; /* N egress Qs */ unsigned int nethctrl; /* N egress ETH or CTRL Qs */ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ unsigned int niq; /* N ingress Qs */ unsigned int tc; /* PCI-E traffic class */ unsigned int pmask; /* port access rights mask */ unsigned int nexactf; /* N exact MPS filters */ unsigned int r_caps; /* read capabilities */ unsigned int wx_caps; /* write/execute capabilities */ }; struct adapter_params { struct sge_params sge; struct tp_params tp; /* PF-only */ struct vpd_params vpd; struct pci_params pci; struct devlog_params devlog; /* PF-only */ struct rss_params rss; /* VF-only */ struct vf_resources vfres; /* VF-only */ unsigned int core_vdd; unsigned int sf_size; /* serial flash size in bytes */ unsigned int sf_nsec; /* # of flash sectors */ unsigned int fw_vers; /* firmware version */ unsigned int bs_vers; /* bootstrap version */ unsigned int tp_vers; /* TP microcode version */ unsigned int er_vers; /* expansion ROM version */ unsigned int scfg_vers; /* Serial Configuration version */ unsigned int vpd_vers; /* VPD version */ unsigned short mtus[NMTUS]; unsigned short a_wnd[NCCTRL_WIN]; unsigned short b_wnd[NCCTRL_WIN]; unsigned int cim_la_size; uint8_t nports; /* # of ethernet ports */ uint8_t portvec; unsigned int chipid:4; /* chip ID. T4 = 4, T5 = 5, ... */ unsigned int rev:4; /* chip revision */ unsigned int fpga:1; /* this is an FPGA */ unsigned int offload:1; /* hw is TOE capable, fw has divvied up card resources for TOE operation. */ unsigned int bypass:1; /* this is a bypass card */ unsigned int ethoffload:1; unsigned int hash_filter:1; unsigned int filter2_wr_support:1; unsigned int port_caps32:1; unsigned int smac_add_support:1; unsigned int ofldq_wr_cred; unsigned int eo_wr_cred; unsigned int max_ordird_qp; unsigned int max_ird_adapter; uint32_t mps_bg_map; /* rx buffer group map for all ports (upto 4) */ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */ bool viid_smt_extn_support; /* FW returns vin, vfvld & smt index? */ unsigned int max_pkts_per_eth_tx_pkts_wr; }; #define CHELSIO_T4 0x4 #define CHELSIO_T5 0x5 #define CHELSIO_T6 0x6 /* * State needed to monitor the forward progress of SGE Ingress DMA activities * and possible hangs. */ struct sge_idma_monitor_state { unsigned int idma_1s_thresh; /* 1s threshold in Core Clock ticks */ unsigned int idma_stalled[2]; /* synthesized stalled timers in HZ */ unsigned int idma_state[2]; /* IDMA Hang detect state */ unsigned int idma_qid[2]; /* IDMA Hung Ingress Queue ID */ unsigned int idma_warn[2]; /* time to warning in HZ */ }; struct trace_params { u32 data[TRACE_LEN / 4]; u32 mask[TRACE_LEN / 4]; unsigned short snap_len; unsigned short min_len; unsigned char skip_ofst; unsigned char skip_len; unsigned char invert; unsigned char port; }; struct link_config { /* OS-specific code owns all the requested_* fields. */ int8_t requested_aneg; /* link autonegotiation */ int8_t requested_fc; /* flow control */ int8_t requested_fec; /* FEC */ u_int requested_speed; /* speed (Mbps) */ uint32_t pcaps; /* link capabilities */ uint32_t acaps; /* advertised capabilities */ uint32_t lpacaps; /* peer advertised capabilities */ u_int speed; /* actual link speed (Mbps) */ int8_t fc; /* actual link flow control */ int8_t fec_hint; /* cable/transceiver recommended fec */ int8_t fec; /* actual FEC */ bool link_ok; /* link up? */ uint8_t link_down_rc; /* link down reason */ }; #include "adapter.h" #ifndef PCI_VENDOR_ID_CHELSIO # define PCI_VENDOR_ID_CHELSIO 0x1425 #endif #define for_each_port(adapter, iter) \ for (iter = 0; iter < (adapter)->params.nports; ++iter) static inline int is_ftid(const struct adapter *sc, u_int tid) { return (sc->tids.nftids > 0 && tid >= sc->tids.ftid_base && tid <= sc->tids.ftid_end); } static inline int is_hpftid(const struct adapter *sc, u_int tid) { return (sc->tids.nhpftids > 0 && tid >= sc->tids.hpftid_base && tid <= sc->tids.hpftid_end); } static inline int is_etid(const struct adapter *sc, u_int tid) { return (sc->tids.netids > 0 && tid >= sc->tids.etid_base && tid <= sc->tids.etid_end); } static inline int is_offload(const struct adapter *adap) { return adap->params.offload; } static inline int is_ethoffload(const struct adapter *adap) { return adap->params.ethoffload; } static inline int is_hashfilter(const struct adapter *adap) { return adap->params.hash_filter; } static inline int chip_id(struct adapter *adap) { return adap->params.chipid; } static inline int chip_rev(struct adapter *adap) { return adap->params.rev; } static inline int is_t4(struct adapter *adap) { return adap->params.chipid == CHELSIO_T4; } static inline int is_t5(struct adapter *adap) { return adap->params.chipid == CHELSIO_T5; } static inline int is_t6(struct adapter *adap) { return adap->params.chipid == CHELSIO_T6; } static inline int is_fpga(struct adapter *adap) { return adap->params.fpga; } static inline unsigned int core_ticks_per_usec(const struct adapter *adap) { return adap->params.vpd.cclk / 1000; } static inline unsigned int us_to_core_ticks(const struct adapter *adap, unsigned int us) { return (us * adap->params.vpd.cclk) / 1000; } static inline unsigned int core_ticks_to_us(const struct adapter *adapter, unsigned int ticks) { /* add Core Clock / 2 to round ticks to nearest uS */ return ((ticks * 1000 + adapter->params.vpd.cclk/2) / adapter->params.vpd.cclk); } static inline unsigned int dack_ticks_to_usec(const struct adapter *adap, unsigned int ticks) { return (ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap); } static inline u_int us_to_tcp_ticks(const struct adapter *adap, u_long us) { return (us * adap->params.vpd.cclk / 1000 >> adap->params.tp.tre); } static inline u_int tcp_ticks_to_us(const struct adapter *adap, u_int ticks) { return ((uint64_t)ticks << adap->params.tp.tre) / core_ticks_per_usec(adap); } void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, u32 val); int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, bool sleep_ok, int timeout); int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, bool sleep_ok); static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, int timeout) { return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true, timeout); } static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl) { return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); } static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl) { return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); } void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, u32 *vals, unsigned int nregs, unsigned int start_idx); void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx); u32 t4_hw_pci_read_cfg4(adapter_t *adapter, int reg); struct fw_filter_wr; void t4_intr_enable(struct adapter *adapter); void t4_intr_disable(struct adapter *adapter); void t4_intr_clear(struct adapter *adapter); int t4_slow_intr_handler(struct adapter *adapter, bool verbose); int t4_hash_mac_addr(const u8 *addr); int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc); int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data); int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data); int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz); int t4_seeprom_wp(struct adapter *adapter, int enable); int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t4_write_flash(struct adapter *adapter, unsigned int addr, unsigned int n, const u8 *data, int byte_oriented); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op); int t5_fw_init_extern_mem(struct adapter *adap); int t4_load_bootcfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); int t4_load_boot(struct adapter *adap, u8 *boot_data, unsigned int boot_addr, unsigned int size); int t4_flash_erase_sectors(struct adapter *adapter, int start, int end); int t4_flash_cfg_addr(struct adapter *adapter); int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); int t4_get_fw_version(struct adapter *adapter, u32 *vers); int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr); int t4_get_bs_version(struct adapter *adapter, u32 *vers); int t4_get_tp_version(struct adapter *adapter, u32 *vers); int t4_get_exprom_version(struct adapter *adapter, u32 *vers); int t4_get_scfg_version(struct adapter *adapter, u32 *vers); int t4_get_vpd_version(struct adapter *adapter, u32 *vers); int t4_get_version_info(struct adapter *adapter); int t4_init_hw(struct adapter *adapter, u32 fw_params); const struct chip_params *t4_get_chip_params(int chipid); int t4_prep_adapter(struct adapter *adapter, u32 *buf); int t4_shutdown_adapter(struct adapter *adapter); int t4_init_devlog_params(struct adapter *adapter, int fw_attach); int t4_init_sge_params(struct adapter *adapter); int t4_init_tp_params(struct adapter *adap); int t4_filter_field_shift(const struct adapter *adap, int filter_sel); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id); void t4_fatal_err(struct adapter *adapter, bool fw_error); int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, int filter_index, int enable); void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, int filter_index, int *enabled); int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq); int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, unsigned int flags); int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, unsigned int flags, unsigned int defq, unsigned int skeyidx, unsigned int skey); int t4_read_rss(struct adapter *adapter, u16 *entries); void t4_read_rss_key(struct adapter *adapter, u32 *key, bool sleep_ok); void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, bool sleep_ok); void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp, bool sleep_ok); void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val, bool sleep_ok); void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, u32 *vfl, u32 *vfh, bool sleep_ok); void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, u32 vfl, u32 vfh, bool sleep_ok); u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok); void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok); u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok); void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok); int t4_mps_set_active_ports(struct adapter *adap, unsigned int port_mask); void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]); void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]); void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres); int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n); int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n); int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, unsigned int *valp); int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, const unsigned int *valp); int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, unsigned int *valp); int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr); void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr); void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp); int t4_get_flash_params(struct adapter *adapter); u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach); int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *parity); int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *parity); int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 size, __be32 *data); void t4_idma_monitor_init(struct adapter *adapter, struct sge_idma_monitor_state *idma); void t4_idma_monitor(struct adapter *adapter, struct sge_idma_monitor_state *idma, int hz, int ticks); int t4_set_vf_mac(struct adapter *adapter, unsigned int pf, unsigned int vf, unsigned int naddr, u8 *addr); unsigned int t4_get_regs_len(struct adapter *adapter); void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size); const char *t4_get_port_type_description(enum fw_port_type port_type); void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); void t4_get_port_stats_offset(struct adapter *adap, int idx, struct port_stats *stats, struct port_stats *offset); void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p); void t4_clr_port_stats(struct adapter *adap, int idx); void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]); void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]); void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, unsigned int *ipg, bool sleep_ok); void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, unsigned int mask, unsigned int val); void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr); void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, bool sleep_ok); void t4_tp_get_tnl_stats(struct adapter *adap, struct tp_tnl_stats *st, bool sleep_ok); void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st, bool sleep_ok); void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, bool sleep_ok); void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, bool sleep_ok); void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, bool sleep_ok); void t4_tp_get_tid_stats(struct adapter *adap, struct tp_tid_stats *st, bool sleep_ok); void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6, bool sleep_ok); void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, struct tp_fcoe_stats *st, bool sleep_ok); void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, const unsigned short *alpha, const unsigned short *beta); void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf); int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps); int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg); int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, unsigned int start, unsigned int n); void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate); -int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map, - bool sleep_ok); +int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode); void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid); void t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr); int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, u64 mask0, u64 mask1, unsigned int crc, bool enable); int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, enum dev_master master, enum dev_state *state); int t4_fw_bye(struct adapter *adap, unsigned int mbox); int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force); int t4_fw_restart(struct adapter *adap, unsigned int mbox); int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, const u8 *fw_data, unsigned int size, int force); int t4_fw_initialize(struct adapter *adap, unsigned int mbox); int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val); int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val, int rw); int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val, int timeout); int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val); int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi, unsigned int cmask, unsigned int pmask, unsigned int exactf, unsigned int rcaps, unsigned int wxcaps); int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, u16 *rss_size, uint8_t *vfvld, uint16_t *vin, unsigned int portfunc, unsigned int idstype); int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, u16 *rss_size, uint8_t *vfvld, uint16_t *vin); int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int viid); int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok); int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, unsigned int naddr, const u8 **addr, bool sleep_ok); int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, int idx, bool sleep_ok); int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int idx, u8 lookup_type, u8 port_id, bool sleep_ok); int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int idx, u8 lookup_type, u8 port_id, bool sleep_ok); int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int vni, unsigned int vni_mask, u8 dip_hit, u8 lookup_type, bool sleep_ok); int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int idx, const u8 *addr, bool persist, uint16_t *smt_idx); int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, const u8 *addr, bool smac); int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac); int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, bool ucast, u64 vec, bool sleep_ok); int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en, bool dcb_en); int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en); int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, unsigned int nblinks); int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, unsigned int *valp); int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, unsigned int val); int t4_i2c_io(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf, bool write); int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf); int t4_i2c_wr(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf); int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id); int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id); int t4_eth_eq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, enum ctxt_type ctype, u32 *data); int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, u32 *data); int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type); const char *t4_link_down_rc_str(unsigned char link_down_rc); int t4_update_port_info(struct port_info *pi); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val); int t4_sched_config(struct adapter *adapter, int type, int minmaxen, int sleep_ok); int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int rateunit, int ratemode, int channel, int cl, int minrate, int maxrate, int weight, int pktsize, int burstsize, int sleep_ok); int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode, unsigned int maxrate, int sleep_ok); int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl, int weight, int sleep_ok); int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl, int mode, unsigned int maxrate, int pktsize, int sleep_ok); int t4_config_watchdog(struct adapter *adapter, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int timeout, unsigned int action); int t4_get_devlog_level(struct adapter *adapter, unsigned int *level); int t4_set_devlog_level(struct adapter *adapter, unsigned int level); void t4_sge_decode_idma_state(struct adapter *adapter, int state); void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok); void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs, u32 start_index, bool sleep_ok); void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok); void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok); int t4_configure_ringbb(struct adapter *adap); int t4_configure_add_smac(struct adapter *adap); int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, u16 vlan); static inline int t4vf_query_params(struct adapter *adapter, unsigned int nparams, const u32 *params, u32 *vals) { return t4_query_params(adapter, 0, 0, 0, nparams, params, vals); } static inline int t4vf_set_params(struct adapter *adapter, unsigned int nparams, const u32 *params, const u32 *vals) { return t4_set_params(adapter, 0, 0, 0, nparams, params, vals); } static inline int t4vf_wr_mbox(struct adapter *adap, const void *cmd, int size, void *rpl) { return t4_wr_mbox(adap, adap->mbox, cmd, size, rpl); } int t4vf_wait_dev_ready(struct adapter *adapter); int t4vf_fw_reset(struct adapter *adapter); int t4vf_get_sge_params(struct adapter *adapter); int t4vf_get_rss_glb_config(struct adapter *adapter); int t4vf_get_vfres(struct adapter *adapter); int t4vf_prep_adapter(struct adapter *adapter); int t4vf_get_vf_mac(struct adapter *adapter, unsigned int port, unsigned int *naddr, u8 *addr); int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, enum t4_bar2_qtype qtype, int user, u64 *pbar2_qoffset, unsigned int *pbar2_qid); unsigned int fwcap_to_speed(uint32_t caps); uint32_t speed_to_fwcap(unsigned int speed); uint32_t fwcap_top_speed(uint32_t caps); static inline int port_top_speed(const struct port_info *pi) { /* Mbps -> Gbps */ return (fwcap_to_speed(pi->link_cfg.pcaps) / 1000); } #endif /* __CHELSIO_COMMON_H */ diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c index bbf58cae97f2..f4197a6a0b1b 100644 --- a/sys/dev/cxgbe/common/t4_hw.c +++ b/sys/dev/cxgbe/common/t4_hw.c @@ -1,11437 +1,11553 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012, 2016 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include #include #include "common.h" #include "t4_regs.h" #include "t4_regs_values.h" #include "firmware/t4fw_interface.h" #undef msleep #define msleep(x) do { \ if (cold) \ DELAY((x) * 1000); \ else \ pause("t4hw", (x) * hz / 1000); \ } while (0) /** * t4_wait_op_done_val - wait until an operation is completed * @adapter: the adapter performing the operation * @reg: the register to check for completion * @mask: a single-bit field within @reg that indicates completion * @polarity: the value of the field when the operation is completed * @attempts: number of check iterations * @delay: delay in usecs between iterations * @valp: where to store the value of the register at completion time * * Wait until an operation is completed by checking a bit in a register * up to @attempts times. If @valp is not NULL the value of the register * at the time it indicated completion is stored there. Returns 0 if the * operation completes and -EAGAIN otherwise. */ static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, int polarity, int attempts, int delay, u32 *valp) { while (1) { u32 val = t4_read_reg(adapter, reg); if (!!(val & mask) == polarity) { if (valp) *valp = val; return 0; } if (--attempts == 0) return -EAGAIN; if (delay) udelay(delay); } } static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, int polarity, int attempts, int delay) { return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, delay, NULL); } /** * t4_set_reg_field - set a register field to a value * @adapter: the adapter to program * @addr: the register address * @mask: specifies the portion of the register to modify * @val: the new value for the register field * * Sets a register field specified by the supplied mask to the * given value. */ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, u32 val) { u32 v = t4_read_reg(adapter, addr) & ~mask; t4_write_reg(adapter, addr, v | val); (void) t4_read_reg(adapter, addr); /* flush */ } /** * t4_read_indirect - read indirectly addressed registers * @adap: the adapter * @addr_reg: register holding the indirect address * @data_reg: register holding the value of the indirect register * @vals: where the read register values are stored * @nregs: how many indirect registers to read * @start_idx: index of first indirect register to read * * Reads registers that are accessed indirectly through an address/data * register pair. */ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, u32 *vals, unsigned int nregs, unsigned int start_idx) { while (nregs--) { t4_write_reg(adap, addr_reg, start_idx); *vals++ = t4_read_reg(adap, data_reg); start_idx++; } } /** * t4_write_indirect - write indirectly addressed registers * @adap: the adapter * @addr_reg: register holding the indirect addresses * @data_reg: register holding the value for the indirect registers * @vals: values to write * @nregs: how many indirect registers to write * @start_idx: address of first indirect register to write * * Writes a sequential block of registers that are accessed indirectly * through an address/data register pair. */ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx) { while (nregs--) { t4_write_reg(adap, addr_reg, start_idx++); t4_write_reg(adap, data_reg, *vals++); } } /* * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor * mechanism. This guarantees that we get the real value even if we're * operating within a Virtual Machine and the Hypervisor is trapping our * Configuration Space accesses. * * N.B. This routine should only be used as a last resort: the firmware uses * the backdoor registers on a regular basis and we can end up * conflicting with it's uses! */ u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg) { u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg); u32 val; if (chip_id(adap) <= CHELSIO_T5) req |= F_ENABLE; else req |= F_T6_ENABLE; if (is_t4(adap)) req |= F_LOCALCFG; t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req); val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); /* * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a * Configuration Space read. (None of the other fields matter when * F_ENABLE is 0 so a simple register write is easier than a * read-modify-write via t4_set_reg_field().) */ t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0); return val; } /* * t4_report_fw_error - report firmware error * @adap: the adapter * * The adapter firmware can indicate error conditions to the host. * If the firmware has indicated an error, print out the reason for * the firmware error. */ static void t4_report_fw_error(struct adapter *adap) { static const char *const reason[] = { "Crash", /* PCIE_FW_EVAL_CRASH */ "During Device Preparation", /* PCIE_FW_EVAL_PREP */ "During Device Configuration", /* PCIE_FW_EVAL_CONF */ "During Device Initialization", /* PCIE_FW_EVAL_INIT */ "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ "Reserved", /* reserved */ }; u32 pcie_fw; pcie_fw = t4_read_reg(adap, A_PCIE_FW); if (pcie_fw & F_PCIE_FW_ERR) { adap->flags &= ~FW_OK; CH_ERR(adap, "firmware reports adapter error: %s (0x%08x)\n", reason[G_PCIE_FW_EVAL(pcie_fw)], pcie_fw); if (pcie_fw != 0xffffffff) t4_os_dump_devlog(adap); } } /* * Get the reply to a mailbox command and store it in @rpl in big-endian order. */ static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, u32 mbox_addr) { for ( ; nflit; nflit--, mbox_addr += 8) *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); } /* * Handle a FW assertion reported in a mailbox. */ static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt) { CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", asrt->u.assert.filename_0_7, be32_to_cpu(asrt->u.assert.line), be32_to_cpu(asrt->u.assert.x), be32_to_cpu(asrt->u.assert.y)); } struct port_tx_state { uint64_t rx_pause; uint64_t tx_frames; }; static void read_tx_state_one(struct adapter *sc, int i, struct port_tx_state *tx_state) { uint32_t rx_pause_reg, tx_frames_reg; if (is_t4(sc)) { tx_frames_reg = PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L); rx_pause_reg = PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L); } else { tx_frames_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L); rx_pause_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L); } tx_state->rx_pause = t4_read_reg64(sc, rx_pause_reg); tx_state->tx_frames = t4_read_reg64(sc, tx_frames_reg); } static void read_tx_state(struct adapter *sc, struct port_tx_state *tx_state) { int i; for_each_port(sc, i) read_tx_state_one(sc, i, &tx_state[i]); } static void check_tx_state(struct adapter *sc, struct port_tx_state *tx_state) { uint32_t port_ctl_reg; uint64_t tx_frames, rx_pause; int i; for_each_port(sc, i) { rx_pause = tx_state[i].rx_pause; tx_frames = tx_state[i].tx_frames; read_tx_state_one(sc, i, &tx_state[i]); /* update */ if (is_t4(sc)) port_ctl_reg = PORT_REG(i, A_MPS_PORT_CTL); else port_ctl_reg = T5_PORT_REG(i, A_MPS_PORT_CTL); if (t4_read_reg(sc, port_ctl_reg) & F_PORTTXEN && rx_pause != tx_state[i].rx_pause && tx_frames == tx_state[i].tx_frames) { t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, 0); mdelay(1); t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, F_PORTTXEN); } } } #define X_CIM_PF_NOACCESS 0xeeeeeeee /** * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox * @adap: the adapter * @mbox: index of the mailbox to use * @cmd: the command to write * @size: command length in bytes * @rpl: where to optionally store the reply * @sleep_ok: if true we may sleep while awaiting command completion * @timeout: time to wait for command to finish before timing out * (negative implies @sleep_ok=false) * * Sends the given command to FW through the selected mailbox and waits * for the FW to execute the command. If @rpl is not %NULL it is used to * store the FW's reply to the command. The command and its optional * reply are of the same length. Some FW commands like RESET and * INITIALIZE can take a considerable amount of time to execute. * @sleep_ok determines whether we may sleep while awaiting the response. * If sleeping is allowed we use progressive backoff otherwise we spin. * Note that passing in a negative @timeout is an alternate mechanism * for specifying @sleep_ok=false. This is useful when a higher level * interface allows for specification of @timeout but not @sleep_ok ... * * The return value is 0 on success or a negative errno on failure. A * failure can happen either because we are not able to execute the * command or FW executes it but signals an error. In the latter case * the return value is the error code indicated by FW (negated). */ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, bool sleep_ok, int timeout) { /* * We delay in small increments at first in an effort to maintain * responsiveness for simple, fast executing commands but then back * off to larger delays to a maximum retry delay. */ static const int delay[] = { 1, 1, 3, 5, 10, 10, 20, 50, 100 }; u32 v; u64 res; int i, ms, delay_idx, ret, next_tx_check; u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); u32 ctl; __be64 cmd_rpl[MBOX_LEN/8]; u32 pcie_fw; struct port_tx_state tx_state[MAX_NPORTS]; if (adap->flags & CHK_MBOX_ACCESS) ASSERT_SYNCHRONIZED_OP(adap); if (size <= 0 || (size & 15) || size > MBOX_LEN) return -EINVAL; if (adap->flags & IS_VF) { if (is_t6(adap)) data_reg = FW_T6VF_MBDATA_BASE_ADDR; else data_reg = FW_T4VF_MBDATA_BASE_ADDR; ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL); } /* * If we have a negative timeout, that implies that we can't sleep. */ if (timeout < 0) { sleep_ok = false; timeout = -timeout; } /* * Attempt to gain access to the mailbox. */ for (i = 0; i < 4; i++) { ctl = t4_read_reg(adap, ctl_reg); v = G_MBOWNER(ctl); if (v != X_MBOWNER_NONE) break; } /* * If we were unable to gain access, report the error to our caller. */ if (v != X_MBOWNER_PL) { t4_report_fw_error(adap); ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; return ret; } /* * If we gain ownership of the mailbox and there's a "valid" message * in it, this is likely an asynchronous error message from the * firmware. So we'll report that and then proceed on with attempting * to issue our own command ... which may well fail if the error * presaged the firmware crashing ... */ if (ctl & F_MBMSGVALID) { CH_DUMP_MBOX(adap, mbox, data_reg, "VLD", NULL, true); } /* * Copy in the new mailbox command and send it on its way ... */ memset(cmd_rpl, 0, sizeof(cmd_rpl)); memcpy(cmd_rpl, cmd, size); CH_DUMP_MBOX(adap, mbox, 0, "cmd", cmd_rpl, false); for (i = 0; i < ARRAY_SIZE(cmd_rpl); i++) t4_write_reg64(adap, data_reg + i * 8, be64_to_cpu(cmd_rpl[i])); if (adap->flags & IS_VF) { /* * For the VFs, the Mailbox Data "registers" are * actually backed by T4's "MA" interface rather than * PL Registers (as is the case for the PFs). Because * these are in different coherency domains, the write * to the VF's PL-register-backed Mailbox Control can * race in front of the writes to the MA-backed VF * Mailbox Data "registers". So we need to do a * read-back on at least one byte of the VF Mailbox * Data registers before doing the write to the VF * Mailbox Control register. */ t4_read_reg(adap, data_reg); } t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); read_tx_state(adap, &tx_state[0]); /* also flushes the write_reg */ next_tx_check = 1000; delay_idx = 0; ms = delay[0]; /* * Loop waiting for the reply; bail out if we time out or the firmware * reports an error. */ pcie_fw = 0; for (i = 0; i < timeout; i += ms) { if (!(adap->flags & IS_VF)) { pcie_fw = t4_read_reg(adap, A_PCIE_FW); if (pcie_fw & F_PCIE_FW_ERR) break; } if (i >= next_tx_check) { check_tx_state(adap, &tx_state[0]); next_tx_check = i + 1000; } if (sleep_ok) { ms = delay[delay_idx]; /* last element may repeat */ if (delay_idx < ARRAY_SIZE(delay) - 1) delay_idx++; msleep(ms); } else { mdelay(ms); } v = t4_read_reg(adap, ctl_reg); if (v == X_CIM_PF_NOACCESS) continue; if (G_MBOWNER(v) == X_MBOWNER_PL) { if (!(v & F_MBMSGVALID)) { t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); continue; } /* * Retrieve the command reply and release the mailbox. */ get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg); CH_DUMP_MBOX(adap, mbox, 0, "rpl", cmd_rpl, false); t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); res = be64_to_cpu(cmd_rpl[0]); if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); res = V_FW_CMD_RETVAL(EIO); } else if (rpl) memcpy(rpl, cmd_rpl, size); return -G_FW_CMD_RETVAL((int)res); } } /* * We timed out waiting for a reply to our mailbox command. Report * the error and also check to see if the firmware reported any * errors ... */ CH_ERR(adap, "command %#x in mbox %d timed out (0x%08x).\n", *(const u8 *)cmd, mbox, pcie_fw); CH_DUMP_MBOX(adap, mbox, 0, "cmdsent", cmd_rpl, true); CH_DUMP_MBOX(adap, mbox, data_reg, "current", NULL, true); if (pcie_fw & F_PCIE_FW_ERR) { ret = -ENXIO; t4_report_fw_error(adap); } else { ret = -ETIMEDOUT; t4_os_dump_devlog(adap); } t4_fatal_err(adap, true); return ret; } int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, bool sleep_ok) { return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok, FW_CMD_MAX_TIMEOUT); } static int t4_edc_err_read(struct adapter *adap, int idx) { u32 edc_ecc_err_addr_reg; u32 edc_bist_status_rdata_reg; if (is_t4(adap)) { CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); return 0; } if (idx != MEM_EDC0 && idx != MEM_EDC1) { CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); return 0; } edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx); edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx); CH_WARN(adap, "edc%d err addr 0x%x: 0x%x.\n", idx, edc_ecc_err_addr_reg, t4_read_reg(adap, edc_ecc_err_addr_reg)); CH_WARN(adap, "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n", edc_bist_status_rdata_reg, (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56), (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64)); return 0; } /** * t4_mc_read - read from MC through backdoor accesses * @adap: the adapter * @idx: which MC to access * @addr: address of first byte requested * @data: 64 bytes of data containing the requested address * @ecc: where to store the corresponding 64-bit ECC word * * Read 64 bytes of data from MC starting at a 64-byte-aligned address * that covers the requested address @addr. If @parity is not %NULL it * is assigned the 64-bit ECC word for the read data. */ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) { int i; u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; if (is_t4(adap)) { mc_bist_cmd_reg = A_MC_BIST_CMD; mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR; mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN; mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA; mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN; } else { mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx); mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx); mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx); mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, idx); mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, idx); } if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST) return -EBUSY; t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU); t4_write_reg(adap, mc_bist_cmd_len_reg, 64); t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc); t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) | F_START_BIST | V_BIST_CMD_GAP(1)); i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1); if (i) return i; #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i) for (i = 15; i >= 0; i--) *data++ = ntohl(t4_read_reg(adap, MC_DATA(i))); if (ecc) *ecc = t4_read_reg64(adap, MC_DATA(16)); #undef MC_DATA return 0; } /** * t4_edc_read - read from EDC through backdoor accesses * @adap: the adapter * @idx: which EDC to access * @addr: address of first byte requested * @data: 64 bytes of data containing the requested address * @ecc: where to store the corresponding 64-bit ECC word * * Read 64 bytes of data from EDC starting at a 64-byte-aligned address * that covers the requested address @addr. If @parity is not %NULL it * is assigned the 64-bit ECC word for the read data. */ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) { int i; u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg; if (is_t4(adap)) { edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx); edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx); edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx); edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN, idx); edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA, idx); } else { /* * These macro are missing in t4_regs.h file. * Added temporarily for testing. */ #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx); edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx); edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx); edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN, idx); edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA, idx); #undef EDC_REG_T5 #undef EDC_STRIDE_T5 } if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST) return -EBUSY; t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU); t4_write_reg(adap, edc_bist_cmd_len_reg, 64); t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); t4_write_reg(adap, edc_bist_cmd_reg, V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST); i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1); if (i) return i; #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i) for (i = 15; i >= 0; i--) *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i))); if (ecc) *ecc = t4_read_reg64(adap, EDC_DATA(16)); #undef EDC_DATA return 0; } /** * t4_mem_read - read EDC 0, EDC 1 or MC into buffer * @adap: the adapter * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC * @addr: address within indicated memory type * @len: amount of memory to read * @buf: host memory buffer * * Reads an [almost] arbitrary memory region in the firmware: the * firmware memory address, length and host buffer must be aligned on * 32-bit boudaries. The memory is returned as a raw byte sequence from * the firmware's memory. If this memory contains data structures which * contain multi-byte integers, it's the callers responsibility to * perform appropriate byte order conversions. */ int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, __be32 *buf) { u32 pos, start, end, offset; int ret; /* * Argument sanity checks ... */ if ((addr & 0x3) || (len & 0x3)) return -EINVAL; /* * The underlaying EDC/MC read routines read 64 bytes at a time so we * need to round down the start and round up the end. We'll start * copying out of the first line at (addr - start) a word at a time. */ start = rounddown2(addr, 64); end = roundup2(addr + len, 64); offset = (addr - start)/sizeof(__be32); for (pos = start; pos < end; pos += 64, offset = 0) { __be32 data[16]; /* * Read the chip's memory block and bail if there's an error. */ if ((mtype == MEM_MC) || (mtype == MEM_MC1)) ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL); else ret = t4_edc_read(adap, mtype, pos, data, NULL); if (ret) return ret; /* * Copy the data into the caller's memory buffer. */ while (offset < 16 && len > 0) { *buf++ = data[offset++]; len -= sizeof(__be32); } } return 0; } /* * Return the specified PCI-E Configuration Space register from our Physical * Function. We try first via a Firmware LDST Command (if fw_attach != 0) * since we prefer to let the firmware own all of these registers, but if that * fails we go for it directly ourselves. */ u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach) { /* * If fw_attach != 0, construct and send the Firmware LDST Command to * retrieve the specified PCI-E Configuration Space register. */ if (drv_fw_attach != 0) { struct fw_ldst_cmd ldst_cmd; int ret; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1); ldst_cmd.u.pcie.ctrl_to_fn = (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf)); ldst_cmd.u.pcie.r = reg; /* * If the LDST Command succeeds, return the result, otherwise * fall through to reading it directly ourselves ... */ ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); if (ret == 0) return be32_to_cpu(ldst_cmd.u.pcie.data[0]); CH_WARN(adap, "Firmware failed to return " "Configuration Space register %d, err = %d\n", reg, -ret); } /* * Read the desired Configuration Space register via the PCI-E * Backdoor mechanism. */ return t4_hw_pci_read_cfg4(adap, reg); } /** * t4_get_regs_len - return the size of the chips register set * @adapter: the adapter * * Returns the size of the chip's BAR0 register space. */ unsigned int t4_get_regs_len(struct adapter *adapter) { unsigned int chip_version = chip_id(adapter); switch (chip_version) { case CHELSIO_T4: if (adapter->flags & IS_VF) return FW_T4VF_REGMAP_SIZE; return T4_REGMAP_SIZE; case CHELSIO_T5: case CHELSIO_T6: if (adapter->flags & IS_VF) return FW_T4VF_REGMAP_SIZE; return T5_REGMAP_SIZE; } CH_ERR(adapter, "Unsupported chip version %d\n", chip_version); return 0; } /** * t4_get_regs - read chip registers into provided buffer * @adap: the adapter * @buf: register buffer * @buf_size: size (in bytes) of register buffer * * If the provided register buffer isn't large enough for the chip's * full register range, the register dump will be truncated to the * register buffer's size. */ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size) { static const unsigned int t4_reg_ranges[] = { 0x1008, 0x1108, 0x1180, 0x1184, 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, 0x11fc, 0x123c, 0x1300, 0x173c, 0x1800, 0x18fc, 0x3000, 0x30d8, 0x30e0, 0x30e4, 0x30ec, 0x5910, 0x5920, 0x5924, 0x5960, 0x5960, 0x5968, 0x5968, 0x5970, 0x5970, 0x5978, 0x5978, 0x5980, 0x5980, 0x5988, 0x5988, 0x5990, 0x5990, 0x5998, 0x5998, 0x59a0, 0x59d4, 0x5a00, 0x5ae0, 0x5ae8, 0x5ae8, 0x5af0, 0x5af0, 0x5af8, 0x5af8, 0x6000, 0x6098, 0x6100, 0x6150, 0x6200, 0x6208, 0x6240, 0x6248, 0x6280, 0x62b0, 0x62c0, 0x6338, 0x6370, 0x638c, 0x6400, 0x643c, 0x6500, 0x6524, 0x6a00, 0x6a04, 0x6a14, 0x6a38, 0x6a60, 0x6a70, 0x6a78, 0x6a78, 0x6b00, 0x6b0c, 0x6b1c, 0x6b84, 0x6bf0, 0x6bf8, 0x6c00, 0x6c0c, 0x6c1c, 0x6c84, 0x6cf0, 0x6cf8, 0x6d00, 0x6d0c, 0x6d1c, 0x6d84, 0x6df0, 0x6df8, 0x6e00, 0x6e0c, 0x6e1c, 0x6e84, 0x6ef0, 0x6ef8, 0x6f00, 0x6f0c, 0x6f1c, 0x6f84, 0x6ff0, 0x6ff8, 0x7000, 0x700c, 0x701c, 0x7084, 0x70f0, 0x70f8, 0x7100, 0x710c, 0x711c, 0x7184, 0x71f0, 0x71f8, 0x7200, 0x720c, 0x721c, 0x7284, 0x72f0, 0x72f8, 0x7300, 0x730c, 0x731c, 0x7384, 0x73f0, 0x73f8, 0x7400, 0x7450, 0x7500, 0x7530, 0x7600, 0x760c, 0x7614, 0x761c, 0x7680, 0x76cc, 0x7700, 0x7798, 0x77c0, 0x77fc, 0x7900, 0x79fc, 0x7b00, 0x7b58, 0x7b60, 0x7b84, 0x7b8c, 0x7c38, 0x7d00, 0x7d38, 0x7d40, 0x7d80, 0x7d8c, 0x7ddc, 0x7de4, 0x7e04, 0x7e10, 0x7e1c, 0x7e24, 0x7e38, 0x7e40, 0x7e44, 0x7e4c, 0x7e78, 0x7e80, 0x7ea4, 0x7eac, 0x7edc, 0x7ee8, 0x7efc, 0x8dc0, 0x8e04, 0x8e10, 0x8e1c, 0x8e30, 0x8e78, 0x8ea0, 0x8eb8, 0x8ec0, 0x8f6c, 0x8fc0, 0x9008, 0x9010, 0x9058, 0x9060, 0x9060, 0x9068, 0x9074, 0x90fc, 0x90fc, 0x9400, 0x9408, 0x9410, 0x9458, 0x9600, 0x9600, 0x9608, 0x9638, 0x9640, 0x96bc, 0x9800, 0x9808, 0x9820, 0x983c, 0x9850, 0x9864, 0x9c00, 0x9c6c, 0x9c80, 0x9cec, 0x9d00, 0x9d6c, 0x9d80, 0x9dec, 0x9e00, 0x9e6c, 0x9e80, 0x9eec, 0x9f00, 0x9f6c, 0x9f80, 0x9fec, 0xd004, 0xd004, 0xd010, 0xd03c, 0xdfc0, 0xdfe0, 0xe000, 0xea7c, 0xf000, 0x11110, 0x11118, 0x11190, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x190e4, 0x190f0, 0x190f8, 0x19100, 0x19110, 0x19120, 0x19124, 0x19150, 0x19194, 0x1919c, 0x191b0, 0x191d0, 0x191e8, 0x19238, 0x1924c, 0x193f8, 0x1943c, 0x1944c, 0x19474, 0x19490, 0x194e0, 0x194f0, 0x194f8, 0x19800, 0x19c08, 0x19c10, 0x19c90, 0x19ca0, 0x19ce4, 0x19cf0, 0x19d40, 0x19d50, 0x19d94, 0x19da0, 0x19de8, 0x19df0, 0x19e40, 0x19e50, 0x19e90, 0x19ea0, 0x19f4c, 0x1a000, 0x1a004, 0x1a010, 0x1a06c, 0x1a0b0, 0x1a0e4, 0x1a0ec, 0x1a0f4, 0x1a100, 0x1a108, 0x1a114, 0x1a120, 0x1a128, 0x1a130, 0x1a138, 0x1a138, 0x1a190, 0x1a1c4, 0x1a1fc, 0x1a1fc, 0x1e040, 0x1e04c, 0x1e284, 0x1e28c, 0x1e2c0, 0x1e2c0, 0x1e2e0, 0x1e2e0, 0x1e300, 0x1e384, 0x1e3c0, 0x1e3c8, 0x1e440, 0x1e44c, 0x1e684, 0x1e68c, 0x1e6c0, 0x1e6c0, 0x1e6e0, 0x1e6e0, 0x1e700, 0x1e784, 0x1e7c0, 0x1e7c8, 0x1e840, 0x1e84c, 0x1ea84, 0x1ea8c, 0x1eac0, 0x1eac0, 0x1eae0, 0x1eae0, 0x1eb00, 0x1eb84, 0x1ebc0, 0x1ebc8, 0x1ec40, 0x1ec4c, 0x1ee84, 0x1ee8c, 0x1eec0, 0x1eec0, 0x1eee0, 0x1eee0, 0x1ef00, 0x1ef84, 0x1efc0, 0x1efc8, 0x1f040, 0x1f04c, 0x1f284, 0x1f28c, 0x1f2c0, 0x1f2c0, 0x1f2e0, 0x1f2e0, 0x1f300, 0x1f384, 0x1f3c0, 0x1f3c8, 0x1f440, 0x1f44c, 0x1f684, 0x1f68c, 0x1f6c0, 0x1f6c0, 0x1f6e0, 0x1f6e0, 0x1f700, 0x1f784, 0x1f7c0, 0x1f7c8, 0x1f840, 0x1f84c, 0x1fa84, 0x1fa8c, 0x1fac0, 0x1fac0, 0x1fae0, 0x1fae0, 0x1fb00, 0x1fb84, 0x1fbc0, 0x1fbc8, 0x1fc40, 0x1fc4c, 0x1fe84, 0x1fe8c, 0x1fec0, 0x1fec0, 0x1fee0, 0x1fee0, 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x20000, 0x2002c, 0x20100, 0x2013c, 0x20190, 0x201a0, 0x201a8, 0x201b8, 0x201c4, 0x201c8, 0x20200, 0x20318, 0x20400, 0x204b4, 0x204c0, 0x20528, 0x20540, 0x20614, 0x21000, 0x21040, 0x2104c, 0x21060, 0x210c0, 0x210ec, 0x21200, 0x21268, 0x21270, 0x21284, 0x212fc, 0x21388, 0x21400, 0x21404, 0x21500, 0x21500, 0x21510, 0x21518, 0x2152c, 0x21530, 0x2153c, 0x2153c, 0x21550, 0x21554, 0x21600, 0x21600, 0x21608, 0x2161c, 0x21624, 0x21628, 0x21630, 0x21634, 0x2163c, 0x2163c, 0x21700, 0x2171c, 0x21780, 0x2178c, 0x21800, 0x21818, 0x21820, 0x21828, 0x21830, 0x21848, 0x21850, 0x21854, 0x21860, 0x21868, 0x21870, 0x21870, 0x21878, 0x21898, 0x218a0, 0x218a8, 0x218b0, 0x218c8, 0x218d0, 0x218d4, 0x218e0, 0x218e8, 0x218f0, 0x218f0, 0x218f8, 0x21a18, 0x21a20, 0x21a28, 0x21a30, 0x21a48, 0x21a50, 0x21a54, 0x21a60, 0x21a68, 0x21a70, 0x21a70, 0x21a78, 0x21a98, 0x21aa0, 0x21aa8, 0x21ab0, 0x21ac8, 0x21ad0, 0x21ad4, 0x21ae0, 0x21ae8, 0x21af0, 0x21af0, 0x21af8, 0x21c18, 0x21c20, 0x21c20, 0x21c28, 0x21c30, 0x21c38, 0x21c38, 0x21c80, 0x21c98, 0x21ca0, 0x21ca8, 0x21cb0, 0x21cc8, 0x21cd0, 0x21cd4, 0x21ce0, 0x21ce8, 0x21cf0, 0x21cf0, 0x21cf8, 0x21d7c, 0x21e00, 0x21e04, 0x22000, 0x2202c, 0x22100, 0x2213c, 0x22190, 0x221a0, 0x221a8, 0x221b8, 0x221c4, 0x221c8, 0x22200, 0x22318, 0x22400, 0x224b4, 0x224c0, 0x22528, 0x22540, 0x22614, 0x23000, 0x23040, 0x2304c, 0x23060, 0x230c0, 0x230ec, 0x23200, 0x23268, 0x23270, 0x23284, 0x232fc, 0x23388, 0x23400, 0x23404, 0x23500, 0x23500, 0x23510, 0x23518, 0x2352c, 0x23530, 0x2353c, 0x2353c, 0x23550, 0x23554, 0x23600, 0x23600, 0x23608, 0x2361c, 0x23624, 0x23628, 0x23630, 0x23634, 0x2363c, 0x2363c, 0x23700, 0x2371c, 0x23780, 0x2378c, 0x23800, 0x23818, 0x23820, 0x23828, 0x23830, 0x23848, 0x23850, 0x23854, 0x23860, 0x23868, 0x23870, 0x23870, 0x23878, 0x23898, 0x238a0, 0x238a8, 0x238b0, 0x238c8, 0x238d0, 0x238d4, 0x238e0, 0x238e8, 0x238f0, 0x238f0, 0x238f8, 0x23a18, 0x23a20, 0x23a28, 0x23a30, 0x23a48, 0x23a50, 0x23a54, 0x23a60, 0x23a68, 0x23a70, 0x23a70, 0x23a78, 0x23a98, 0x23aa0, 0x23aa8, 0x23ab0, 0x23ac8, 0x23ad0, 0x23ad4, 0x23ae0, 0x23ae8, 0x23af0, 0x23af0, 0x23af8, 0x23c18, 0x23c20, 0x23c20, 0x23c28, 0x23c30, 0x23c38, 0x23c38, 0x23c80, 0x23c98, 0x23ca0, 0x23ca8, 0x23cb0, 0x23cc8, 0x23cd0, 0x23cd4, 0x23ce0, 0x23ce8, 0x23cf0, 0x23cf0, 0x23cf8, 0x23d7c, 0x23e00, 0x23e04, 0x24000, 0x2402c, 0x24100, 0x2413c, 0x24190, 0x241a0, 0x241a8, 0x241b8, 0x241c4, 0x241c8, 0x24200, 0x24318, 0x24400, 0x244b4, 0x244c0, 0x24528, 0x24540, 0x24614, 0x25000, 0x25040, 0x2504c, 0x25060, 0x250c0, 0x250ec, 0x25200, 0x25268, 0x25270, 0x25284, 0x252fc, 0x25388, 0x25400, 0x25404, 0x25500, 0x25500, 0x25510, 0x25518, 0x2552c, 0x25530, 0x2553c, 0x2553c, 0x25550, 0x25554, 0x25600, 0x25600, 0x25608, 0x2561c, 0x25624, 0x25628, 0x25630, 0x25634, 0x2563c, 0x2563c, 0x25700, 0x2571c, 0x25780, 0x2578c, 0x25800, 0x25818, 0x25820, 0x25828, 0x25830, 0x25848, 0x25850, 0x25854, 0x25860, 0x25868, 0x25870, 0x25870, 0x25878, 0x25898, 0x258a0, 0x258a8, 0x258b0, 0x258c8, 0x258d0, 0x258d4, 0x258e0, 0x258e8, 0x258f0, 0x258f0, 0x258f8, 0x25a18, 0x25a20, 0x25a28, 0x25a30, 0x25a48, 0x25a50, 0x25a54, 0x25a60, 0x25a68, 0x25a70, 0x25a70, 0x25a78, 0x25a98, 0x25aa0, 0x25aa8, 0x25ab0, 0x25ac8, 0x25ad0, 0x25ad4, 0x25ae0, 0x25ae8, 0x25af0, 0x25af0, 0x25af8, 0x25c18, 0x25c20, 0x25c20, 0x25c28, 0x25c30, 0x25c38, 0x25c38, 0x25c80, 0x25c98, 0x25ca0, 0x25ca8, 0x25cb0, 0x25cc8, 0x25cd0, 0x25cd4, 0x25ce0, 0x25ce8, 0x25cf0, 0x25cf0, 0x25cf8, 0x25d7c, 0x25e00, 0x25e04, 0x26000, 0x2602c, 0x26100, 0x2613c, 0x26190, 0x261a0, 0x261a8, 0x261b8, 0x261c4, 0x261c8, 0x26200, 0x26318, 0x26400, 0x264b4, 0x264c0, 0x26528, 0x26540, 0x26614, 0x27000, 0x27040, 0x2704c, 0x27060, 0x270c0, 0x270ec, 0x27200, 0x27268, 0x27270, 0x27284, 0x272fc, 0x27388, 0x27400, 0x27404, 0x27500, 0x27500, 0x27510, 0x27518, 0x2752c, 0x27530, 0x2753c, 0x2753c, 0x27550, 0x27554, 0x27600, 0x27600, 0x27608, 0x2761c, 0x27624, 0x27628, 0x27630, 0x27634, 0x2763c, 0x2763c, 0x27700, 0x2771c, 0x27780, 0x2778c, 0x27800, 0x27818, 0x27820, 0x27828, 0x27830, 0x27848, 0x27850, 0x27854, 0x27860, 0x27868, 0x27870, 0x27870, 0x27878, 0x27898, 0x278a0, 0x278a8, 0x278b0, 0x278c8, 0x278d0, 0x278d4, 0x278e0, 0x278e8, 0x278f0, 0x278f0, 0x278f8, 0x27a18, 0x27a20, 0x27a28, 0x27a30, 0x27a48, 0x27a50, 0x27a54, 0x27a60, 0x27a68, 0x27a70, 0x27a70, 0x27a78, 0x27a98, 0x27aa0, 0x27aa8, 0x27ab0, 0x27ac8, 0x27ad0, 0x27ad4, 0x27ae0, 0x27ae8, 0x27af0, 0x27af0, 0x27af8, 0x27c18, 0x27c20, 0x27c20, 0x27c28, 0x27c30, 0x27c38, 0x27c38, 0x27c80, 0x27c98, 0x27ca0, 0x27ca8, 0x27cb0, 0x27cc8, 0x27cd0, 0x27cd4, 0x27ce0, 0x27ce8, 0x27cf0, 0x27cf0, 0x27cf8, 0x27d7c, 0x27e00, 0x27e04, }; static const unsigned int t4vf_reg_ranges[] = { VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), VF_MPS_REG(A_MPS_VF_CTL), VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI), VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), FW_T4VF_MBDATA_BASE_ADDR, FW_T4VF_MBDATA_BASE_ADDR + ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), }; static const unsigned int t5_reg_ranges[] = { 0x1008, 0x10c0, 0x10cc, 0x10f8, 0x1100, 0x1100, 0x110c, 0x1148, 0x1180, 0x1184, 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, 0x11fc, 0x123c, 0x1280, 0x173c, 0x1800, 0x18fc, 0x3000, 0x3028, 0x3060, 0x30b0, 0x30b8, 0x30d8, 0x30e0, 0x30fc, 0x3140, 0x357c, 0x35a8, 0x35cc, 0x35ec, 0x35ec, 0x3600, 0x5624, 0x56cc, 0x56ec, 0x56f4, 0x5720, 0x5728, 0x575c, 0x580c, 0x5814, 0x5890, 0x589c, 0x58a4, 0x58ac, 0x58b8, 0x58bc, 0x5940, 0x59c8, 0x59d0, 0x59dc, 0x59fc, 0x5a18, 0x5a60, 0x5a70, 0x5a80, 0x5a9c, 0x5b94, 0x5bfc, 0x6000, 0x6020, 0x6028, 0x6040, 0x6058, 0x609c, 0x60a8, 0x614c, 0x7700, 0x7798, 0x77c0, 0x78fc, 0x7b00, 0x7b58, 0x7b60, 0x7b84, 0x7b8c, 0x7c54, 0x7d00, 0x7d38, 0x7d40, 0x7d80, 0x7d8c, 0x7ddc, 0x7de4, 0x7e04, 0x7e10, 0x7e1c, 0x7e24, 0x7e38, 0x7e40, 0x7e44, 0x7e4c, 0x7e78, 0x7e80, 0x7edc, 0x7ee8, 0x7efc, 0x8dc0, 0x8de0, 0x8df8, 0x8e04, 0x8e10, 0x8e84, 0x8ea0, 0x8f84, 0x8fc0, 0x9058, 0x9060, 0x9060, 0x9068, 0x90f8, 0x9400, 0x9408, 0x9410, 0x9470, 0x9600, 0x9600, 0x9608, 0x9638, 0x9640, 0x96f4, 0x9800, 0x9808, 0x9810, 0x9864, 0x9c00, 0x9c6c, 0x9c80, 0x9cec, 0x9d00, 0x9d6c, 0x9d80, 0x9dec, 0x9e00, 0x9e6c, 0x9e80, 0x9eec, 0x9f00, 0x9f6c, 0x9f80, 0xa020, 0xd000, 0xd004, 0xd010, 0xd03c, 0xdfc0, 0xdfe0, 0xe000, 0x1106c, 0x11074, 0x11088, 0x1109c, 0x1117c, 0x11190, 0x11204, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x190e8, 0x190f0, 0x190f8, 0x19100, 0x19110, 0x19120, 0x19124, 0x19150, 0x19194, 0x1919c, 0x191b0, 0x191d0, 0x191e8, 0x19238, 0x19290, 0x193f8, 0x19428, 0x19430, 0x19444, 0x1944c, 0x1946c, 0x19474, 0x19474, 0x19490, 0x194cc, 0x194f0, 0x194f8, 0x19c00, 0x19c08, 0x19c10, 0x19c60, 0x19c94, 0x19ce4, 0x19cf0, 0x19d40, 0x19d50, 0x19d94, 0x19da0, 0x19de8, 0x19df0, 0x19e10, 0x19e50, 0x19e90, 0x19ea0, 0x19f24, 0x19f34, 0x19f34, 0x19f40, 0x19f50, 0x19f90, 0x19fb4, 0x19fc4, 0x19fe4, 0x1a000, 0x1a004, 0x1a010, 0x1a06c, 0x1a0b0, 0x1a0e4, 0x1a0ec, 0x1a0f8, 0x1a100, 0x1a108, 0x1a114, 0x1a130, 0x1a138, 0x1a1c4, 0x1a1fc, 0x1a1fc, 0x1e008, 0x1e00c, 0x1e040, 0x1e044, 0x1e04c, 0x1e04c, 0x1e284, 0x1e290, 0x1e2c0, 0x1e2c0, 0x1e2e0, 0x1e2e0, 0x1e300, 0x1e384, 0x1e3c0, 0x1e3c8, 0x1e408, 0x1e40c, 0x1e440, 0x1e444, 0x1e44c, 0x1e44c, 0x1e684, 0x1e690, 0x1e6c0, 0x1e6c0, 0x1e6e0, 0x1e6e0, 0x1e700, 0x1e784, 0x1e7c0, 0x1e7c8, 0x1e808, 0x1e80c, 0x1e840, 0x1e844, 0x1e84c, 0x1e84c, 0x1ea84, 0x1ea90, 0x1eac0, 0x1eac0, 0x1eae0, 0x1eae0, 0x1eb00, 0x1eb84, 0x1ebc0, 0x1ebc8, 0x1ec08, 0x1ec0c, 0x1ec40, 0x1ec44, 0x1ec4c, 0x1ec4c, 0x1ee84, 0x1ee90, 0x1eec0, 0x1eec0, 0x1eee0, 0x1eee0, 0x1ef00, 0x1ef84, 0x1efc0, 0x1efc8, 0x1f008, 0x1f00c, 0x1f040, 0x1f044, 0x1f04c, 0x1f04c, 0x1f284, 0x1f290, 0x1f2c0, 0x1f2c0, 0x1f2e0, 0x1f2e0, 0x1f300, 0x1f384, 0x1f3c0, 0x1f3c8, 0x1f408, 0x1f40c, 0x1f440, 0x1f444, 0x1f44c, 0x1f44c, 0x1f684, 0x1f690, 0x1f6c0, 0x1f6c0, 0x1f6e0, 0x1f6e0, 0x1f700, 0x1f784, 0x1f7c0, 0x1f7c8, 0x1f808, 0x1f80c, 0x1f840, 0x1f844, 0x1f84c, 0x1f84c, 0x1fa84, 0x1fa90, 0x1fac0, 0x1fac0, 0x1fae0, 0x1fae0, 0x1fb00, 0x1fb84, 0x1fbc0, 0x1fbc8, 0x1fc08, 0x1fc0c, 0x1fc40, 0x1fc44, 0x1fc4c, 0x1fc4c, 0x1fe84, 0x1fe90, 0x1fec0, 0x1fec0, 0x1fee0, 0x1fee0, 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x30000, 0x30030, 0x30100, 0x30144, 0x30190, 0x301a0, 0x301a8, 0x301b8, 0x301c4, 0x301c8, 0x301d0, 0x301d0, 0x30200, 0x30318, 0x30400, 0x304b4, 0x304c0, 0x3052c, 0x30540, 0x3061c, 0x30800, 0x30828, 0x30834, 0x30834, 0x308c0, 0x30908, 0x30910, 0x309ac, 0x30a00, 0x30a14, 0x30a1c, 0x30a2c, 0x30a44, 0x30a50, 0x30a74, 0x30a74, 0x30a7c, 0x30afc, 0x30b08, 0x30c24, 0x30d00, 0x30d00, 0x30d08, 0x30d14, 0x30d1c, 0x30d20, 0x30d3c, 0x30d3c, 0x30d48, 0x30d50, 0x31200, 0x3120c, 0x31220, 0x31220, 0x31240, 0x31240, 0x31600, 0x3160c, 0x31a00, 0x31a1c, 0x31e00, 0x31e20, 0x31e38, 0x31e3c, 0x31e80, 0x31e80, 0x31e88, 0x31ea8, 0x31eb0, 0x31eb4, 0x31ec8, 0x31ed4, 0x31fb8, 0x32004, 0x32200, 0x32200, 0x32208, 0x32240, 0x32248, 0x32280, 0x32288, 0x322c0, 0x322c8, 0x322fc, 0x32600, 0x32630, 0x32a00, 0x32abc, 0x32b00, 0x32b10, 0x32b20, 0x32b30, 0x32b40, 0x32b50, 0x32b60, 0x32b70, 0x33000, 0x33028, 0x33030, 0x33048, 0x33060, 0x33068, 0x33070, 0x3309c, 0x330f0, 0x33128, 0x33130, 0x33148, 0x33160, 0x33168, 0x33170, 0x3319c, 0x331f0, 0x33238, 0x33240, 0x33240, 0x33248, 0x33250, 0x3325c, 0x33264, 0x33270, 0x332b8, 0x332c0, 0x332e4, 0x332f8, 0x33338, 0x33340, 0x33340, 0x33348, 0x33350, 0x3335c, 0x33364, 0x33370, 0x333b8, 0x333c0, 0x333e4, 0x333f8, 0x33428, 0x33430, 0x33448, 0x33460, 0x33468, 0x33470, 0x3349c, 0x334f0, 0x33528, 0x33530, 0x33548, 0x33560, 0x33568, 0x33570, 0x3359c, 0x335f0, 0x33638, 0x33640, 0x33640, 0x33648, 0x33650, 0x3365c, 0x33664, 0x33670, 0x336b8, 0x336c0, 0x336e4, 0x336f8, 0x33738, 0x33740, 0x33740, 0x33748, 0x33750, 0x3375c, 0x33764, 0x33770, 0x337b8, 0x337c0, 0x337e4, 0x337f8, 0x337fc, 0x33814, 0x33814, 0x3382c, 0x3382c, 0x33880, 0x3388c, 0x338e8, 0x338ec, 0x33900, 0x33928, 0x33930, 0x33948, 0x33960, 0x33968, 0x33970, 0x3399c, 0x339f0, 0x33a38, 0x33a40, 0x33a40, 0x33a48, 0x33a50, 0x33a5c, 0x33a64, 0x33a70, 0x33ab8, 0x33ac0, 0x33ae4, 0x33af8, 0x33b10, 0x33b28, 0x33b28, 0x33b3c, 0x33b50, 0x33bf0, 0x33c10, 0x33c28, 0x33c28, 0x33c3c, 0x33c50, 0x33cf0, 0x33cfc, 0x34000, 0x34030, 0x34100, 0x34144, 0x34190, 0x341a0, 0x341a8, 0x341b8, 0x341c4, 0x341c8, 0x341d0, 0x341d0, 0x34200, 0x34318, 0x34400, 0x344b4, 0x344c0, 0x3452c, 0x34540, 0x3461c, 0x34800, 0x34828, 0x34834, 0x34834, 0x348c0, 0x34908, 0x34910, 0x349ac, 0x34a00, 0x34a14, 0x34a1c, 0x34a2c, 0x34a44, 0x34a50, 0x34a74, 0x34a74, 0x34a7c, 0x34afc, 0x34b08, 0x34c24, 0x34d00, 0x34d00, 0x34d08, 0x34d14, 0x34d1c, 0x34d20, 0x34d3c, 0x34d3c, 0x34d48, 0x34d50, 0x35200, 0x3520c, 0x35220, 0x35220, 0x35240, 0x35240, 0x35600, 0x3560c, 0x35a00, 0x35a1c, 0x35e00, 0x35e20, 0x35e38, 0x35e3c, 0x35e80, 0x35e80, 0x35e88, 0x35ea8, 0x35eb0, 0x35eb4, 0x35ec8, 0x35ed4, 0x35fb8, 0x36004, 0x36200, 0x36200, 0x36208, 0x36240, 0x36248, 0x36280, 0x36288, 0x362c0, 0x362c8, 0x362fc, 0x36600, 0x36630, 0x36a00, 0x36abc, 0x36b00, 0x36b10, 0x36b20, 0x36b30, 0x36b40, 0x36b50, 0x36b60, 0x36b70, 0x37000, 0x37028, 0x37030, 0x37048, 0x37060, 0x37068, 0x37070, 0x3709c, 0x370f0, 0x37128, 0x37130, 0x37148, 0x37160, 0x37168, 0x37170, 0x3719c, 0x371f0, 0x37238, 0x37240, 0x37240, 0x37248, 0x37250, 0x3725c, 0x37264, 0x37270, 0x372b8, 0x372c0, 0x372e4, 0x372f8, 0x37338, 0x37340, 0x37340, 0x37348, 0x37350, 0x3735c, 0x37364, 0x37370, 0x373b8, 0x373c0, 0x373e4, 0x373f8, 0x37428, 0x37430, 0x37448, 0x37460, 0x37468, 0x37470, 0x3749c, 0x374f0, 0x37528, 0x37530, 0x37548, 0x37560, 0x37568, 0x37570, 0x3759c, 0x375f0, 0x37638, 0x37640, 0x37640, 0x37648, 0x37650, 0x3765c, 0x37664, 0x37670, 0x376b8, 0x376c0, 0x376e4, 0x376f8, 0x37738, 0x37740, 0x37740, 0x37748, 0x37750, 0x3775c, 0x37764, 0x37770, 0x377b8, 0x377c0, 0x377e4, 0x377f8, 0x377fc, 0x37814, 0x37814, 0x3782c, 0x3782c, 0x37880, 0x3788c, 0x378e8, 0x378ec, 0x37900, 0x37928, 0x37930, 0x37948, 0x37960, 0x37968, 0x37970, 0x3799c, 0x379f0, 0x37a38, 0x37a40, 0x37a40, 0x37a48, 0x37a50, 0x37a5c, 0x37a64, 0x37a70, 0x37ab8, 0x37ac0, 0x37ae4, 0x37af8, 0x37b10, 0x37b28, 0x37b28, 0x37b3c, 0x37b50, 0x37bf0, 0x37c10, 0x37c28, 0x37c28, 0x37c3c, 0x37c50, 0x37cf0, 0x37cfc, 0x38000, 0x38030, 0x38100, 0x38144, 0x38190, 0x381a0, 0x381a8, 0x381b8, 0x381c4, 0x381c8, 0x381d0, 0x381d0, 0x38200, 0x38318, 0x38400, 0x384b4, 0x384c0, 0x3852c, 0x38540, 0x3861c, 0x38800, 0x38828, 0x38834, 0x38834, 0x388c0, 0x38908, 0x38910, 0x389ac, 0x38a00, 0x38a14, 0x38a1c, 0x38a2c, 0x38a44, 0x38a50, 0x38a74, 0x38a74, 0x38a7c, 0x38afc, 0x38b08, 0x38c24, 0x38d00, 0x38d00, 0x38d08, 0x38d14, 0x38d1c, 0x38d20, 0x38d3c, 0x38d3c, 0x38d48, 0x38d50, 0x39200, 0x3920c, 0x39220, 0x39220, 0x39240, 0x39240, 0x39600, 0x3960c, 0x39a00, 0x39a1c, 0x39e00, 0x39e20, 0x39e38, 0x39e3c, 0x39e80, 0x39e80, 0x39e88, 0x39ea8, 0x39eb0, 0x39eb4, 0x39ec8, 0x39ed4, 0x39fb8, 0x3a004, 0x3a200, 0x3a200, 0x3a208, 0x3a240, 0x3a248, 0x3a280, 0x3a288, 0x3a2c0, 0x3a2c8, 0x3a2fc, 0x3a600, 0x3a630, 0x3aa00, 0x3aabc, 0x3ab00, 0x3ab10, 0x3ab20, 0x3ab30, 0x3ab40, 0x3ab50, 0x3ab60, 0x3ab70, 0x3b000, 0x3b028, 0x3b030, 0x3b048, 0x3b060, 0x3b068, 0x3b070, 0x3b09c, 0x3b0f0, 0x3b128, 0x3b130, 0x3b148, 0x3b160, 0x3b168, 0x3b170, 0x3b19c, 0x3b1f0, 0x3b238, 0x3b240, 0x3b240, 0x3b248, 0x3b250, 0x3b25c, 0x3b264, 0x3b270, 0x3b2b8, 0x3b2c0, 0x3b2e4, 0x3b2f8, 0x3b338, 0x3b340, 0x3b340, 0x3b348, 0x3b350, 0x3b35c, 0x3b364, 0x3b370, 0x3b3b8, 0x3b3c0, 0x3b3e4, 0x3b3f8, 0x3b428, 0x3b430, 0x3b448, 0x3b460, 0x3b468, 0x3b470, 0x3b49c, 0x3b4f0, 0x3b528, 0x3b530, 0x3b548, 0x3b560, 0x3b568, 0x3b570, 0x3b59c, 0x3b5f0, 0x3b638, 0x3b640, 0x3b640, 0x3b648, 0x3b650, 0x3b65c, 0x3b664, 0x3b670, 0x3b6b8, 0x3b6c0, 0x3b6e4, 0x3b6f8, 0x3b738, 0x3b740, 0x3b740, 0x3b748, 0x3b750, 0x3b75c, 0x3b764, 0x3b770, 0x3b7b8, 0x3b7c0, 0x3b7e4, 0x3b7f8, 0x3b7fc, 0x3b814, 0x3b814, 0x3b82c, 0x3b82c, 0x3b880, 0x3b88c, 0x3b8e8, 0x3b8ec, 0x3b900, 0x3b928, 0x3b930, 0x3b948, 0x3b960, 0x3b968, 0x3b970, 0x3b99c, 0x3b9f0, 0x3ba38, 0x3ba40, 0x3ba40, 0x3ba48, 0x3ba50, 0x3ba5c, 0x3ba64, 0x3ba70, 0x3bab8, 0x3bac0, 0x3bae4, 0x3baf8, 0x3bb10, 0x3bb28, 0x3bb28, 0x3bb3c, 0x3bb50, 0x3bbf0, 0x3bc10, 0x3bc28, 0x3bc28, 0x3bc3c, 0x3bc50, 0x3bcf0, 0x3bcfc, 0x3c000, 0x3c030, 0x3c100, 0x3c144, 0x3c190, 0x3c1a0, 0x3c1a8, 0x3c1b8, 0x3c1c4, 0x3c1c8, 0x3c1d0, 0x3c1d0, 0x3c200, 0x3c318, 0x3c400, 0x3c4b4, 0x3c4c0, 0x3c52c, 0x3c540, 0x3c61c, 0x3c800, 0x3c828, 0x3c834, 0x3c834, 0x3c8c0, 0x3c908, 0x3c910, 0x3c9ac, 0x3ca00, 0x3ca14, 0x3ca1c, 0x3ca2c, 0x3ca44, 0x3ca50, 0x3ca74, 0x3ca74, 0x3ca7c, 0x3cafc, 0x3cb08, 0x3cc24, 0x3cd00, 0x3cd00, 0x3cd08, 0x3cd14, 0x3cd1c, 0x3cd20, 0x3cd3c, 0x3cd3c, 0x3cd48, 0x3cd50, 0x3d200, 0x3d20c, 0x3d220, 0x3d220, 0x3d240, 0x3d240, 0x3d600, 0x3d60c, 0x3da00, 0x3da1c, 0x3de00, 0x3de20, 0x3de38, 0x3de3c, 0x3de80, 0x3de80, 0x3de88, 0x3dea8, 0x3deb0, 0x3deb4, 0x3dec8, 0x3ded4, 0x3dfb8, 0x3e004, 0x3e200, 0x3e200, 0x3e208, 0x3e240, 0x3e248, 0x3e280, 0x3e288, 0x3e2c0, 0x3e2c8, 0x3e2fc, 0x3e600, 0x3e630, 0x3ea00, 0x3eabc, 0x3eb00, 0x3eb10, 0x3eb20, 0x3eb30, 0x3eb40, 0x3eb50, 0x3eb60, 0x3eb70, 0x3f000, 0x3f028, 0x3f030, 0x3f048, 0x3f060, 0x3f068, 0x3f070, 0x3f09c, 0x3f0f0, 0x3f128, 0x3f130, 0x3f148, 0x3f160, 0x3f168, 0x3f170, 0x3f19c, 0x3f1f0, 0x3f238, 0x3f240, 0x3f240, 0x3f248, 0x3f250, 0x3f25c, 0x3f264, 0x3f270, 0x3f2b8, 0x3f2c0, 0x3f2e4, 0x3f2f8, 0x3f338, 0x3f340, 0x3f340, 0x3f348, 0x3f350, 0x3f35c, 0x3f364, 0x3f370, 0x3f3b8, 0x3f3c0, 0x3f3e4, 0x3f3f8, 0x3f428, 0x3f430, 0x3f448, 0x3f460, 0x3f468, 0x3f470, 0x3f49c, 0x3f4f0, 0x3f528, 0x3f530, 0x3f548, 0x3f560, 0x3f568, 0x3f570, 0x3f59c, 0x3f5f0, 0x3f638, 0x3f640, 0x3f640, 0x3f648, 0x3f650, 0x3f65c, 0x3f664, 0x3f670, 0x3f6b8, 0x3f6c0, 0x3f6e4, 0x3f6f8, 0x3f738, 0x3f740, 0x3f740, 0x3f748, 0x3f750, 0x3f75c, 0x3f764, 0x3f770, 0x3f7b8, 0x3f7c0, 0x3f7e4, 0x3f7f8, 0x3f7fc, 0x3f814, 0x3f814, 0x3f82c, 0x3f82c, 0x3f880, 0x3f88c, 0x3f8e8, 0x3f8ec, 0x3f900, 0x3f928, 0x3f930, 0x3f948, 0x3f960, 0x3f968, 0x3f970, 0x3f99c, 0x3f9f0, 0x3fa38, 0x3fa40, 0x3fa40, 0x3fa48, 0x3fa50, 0x3fa5c, 0x3fa64, 0x3fa70, 0x3fab8, 0x3fac0, 0x3fae4, 0x3faf8, 0x3fb10, 0x3fb28, 0x3fb28, 0x3fb3c, 0x3fb50, 0x3fbf0, 0x3fc10, 0x3fc28, 0x3fc28, 0x3fc3c, 0x3fc50, 0x3fcf0, 0x3fcfc, 0x40000, 0x4000c, 0x40040, 0x40050, 0x40060, 0x40068, 0x4007c, 0x4008c, 0x40094, 0x400b0, 0x400c0, 0x40144, 0x40180, 0x4018c, 0x40200, 0x40254, 0x40260, 0x40264, 0x40270, 0x40288, 0x40290, 0x40298, 0x402ac, 0x402c8, 0x402d0, 0x402e0, 0x402f0, 0x402f0, 0x40300, 0x4033c, 0x403f8, 0x403fc, 0x41304, 0x413c4, 0x41400, 0x4140c, 0x41414, 0x4141c, 0x41480, 0x414d0, 0x44000, 0x44054, 0x4405c, 0x44078, 0x440c0, 0x44174, 0x44180, 0x441ac, 0x441b4, 0x441b8, 0x441c0, 0x44254, 0x4425c, 0x44278, 0x442c0, 0x44374, 0x44380, 0x443ac, 0x443b4, 0x443b8, 0x443c0, 0x44454, 0x4445c, 0x44478, 0x444c0, 0x44574, 0x44580, 0x445ac, 0x445b4, 0x445b8, 0x445c0, 0x44654, 0x4465c, 0x44678, 0x446c0, 0x44774, 0x44780, 0x447ac, 0x447b4, 0x447b8, 0x447c0, 0x44854, 0x4485c, 0x44878, 0x448c0, 0x44974, 0x44980, 0x449ac, 0x449b4, 0x449b8, 0x449c0, 0x449fc, 0x45000, 0x45004, 0x45010, 0x45030, 0x45040, 0x45060, 0x45068, 0x45068, 0x45080, 0x45084, 0x450a0, 0x450b0, 0x45200, 0x45204, 0x45210, 0x45230, 0x45240, 0x45260, 0x45268, 0x45268, 0x45280, 0x45284, 0x452a0, 0x452b0, 0x460c0, 0x460e4, 0x47000, 0x4703c, 0x47044, 0x4708c, 0x47200, 0x47250, 0x47400, 0x47408, 0x47414, 0x47420, 0x47600, 0x47618, 0x47800, 0x47814, 0x48000, 0x4800c, 0x48040, 0x48050, 0x48060, 0x48068, 0x4807c, 0x4808c, 0x48094, 0x480b0, 0x480c0, 0x48144, 0x48180, 0x4818c, 0x48200, 0x48254, 0x48260, 0x48264, 0x48270, 0x48288, 0x48290, 0x48298, 0x482ac, 0x482c8, 0x482d0, 0x482e0, 0x482f0, 0x482f0, 0x48300, 0x4833c, 0x483f8, 0x483fc, 0x49304, 0x493c4, 0x49400, 0x4940c, 0x49414, 0x4941c, 0x49480, 0x494d0, 0x4c000, 0x4c054, 0x4c05c, 0x4c078, 0x4c0c0, 0x4c174, 0x4c180, 0x4c1ac, 0x4c1b4, 0x4c1b8, 0x4c1c0, 0x4c254, 0x4c25c, 0x4c278, 0x4c2c0, 0x4c374, 0x4c380, 0x4c3ac, 0x4c3b4, 0x4c3b8, 0x4c3c0, 0x4c454, 0x4c45c, 0x4c478, 0x4c4c0, 0x4c574, 0x4c580, 0x4c5ac, 0x4c5b4, 0x4c5b8, 0x4c5c0, 0x4c654, 0x4c65c, 0x4c678, 0x4c6c0, 0x4c774, 0x4c780, 0x4c7ac, 0x4c7b4, 0x4c7b8, 0x4c7c0, 0x4c854, 0x4c85c, 0x4c878, 0x4c8c0, 0x4c974, 0x4c980, 0x4c9ac, 0x4c9b4, 0x4c9b8, 0x4c9c0, 0x4c9fc, 0x4d000, 0x4d004, 0x4d010, 0x4d030, 0x4d040, 0x4d060, 0x4d068, 0x4d068, 0x4d080, 0x4d084, 0x4d0a0, 0x4d0b0, 0x4d200, 0x4d204, 0x4d210, 0x4d230, 0x4d240, 0x4d260, 0x4d268, 0x4d268, 0x4d280, 0x4d284, 0x4d2a0, 0x4d2b0, 0x4e0c0, 0x4e0e4, 0x4f000, 0x4f03c, 0x4f044, 0x4f08c, 0x4f200, 0x4f250, 0x4f400, 0x4f408, 0x4f414, 0x4f420, 0x4f600, 0x4f618, 0x4f800, 0x4f814, 0x50000, 0x50084, 0x50090, 0x500cc, 0x50400, 0x50400, 0x50800, 0x50884, 0x50890, 0x508cc, 0x50c00, 0x50c00, 0x51000, 0x5101c, 0x51300, 0x51308, }; static const unsigned int t5vf_reg_ranges[] = { VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), VF_MPS_REG(A_MPS_VF_CTL), VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), FW_T4VF_MBDATA_BASE_ADDR, FW_T4VF_MBDATA_BASE_ADDR + ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), }; static const unsigned int t6_reg_ranges[] = { 0x1008, 0x101c, 0x1024, 0x10a8, 0x10b4, 0x10f8, 0x1100, 0x1114, 0x111c, 0x112c, 0x1138, 0x113c, 0x1144, 0x114c, 0x1180, 0x1184, 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11c4, 0x11fc, 0x1274, 0x1280, 0x133c, 0x1800, 0x18fc, 0x3000, 0x302c, 0x3060, 0x30b0, 0x30b8, 0x30d8, 0x30e0, 0x30fc, 0x3140, 0x357c, 0x35a8, 0x35cc, 0x35ec, 0x35ec, 0x3600, 0x5624, 0x56cc, 0x56ec, 0x56f4, 0x5720, 0x5728, 0x575c, 0x580c, 0x5814, 0x5890, 0x589c, 0x58a4, 0x58ac, 0x58b8, 0x58bc, 0x5940, 0x595c, 0x5980, 0x598c, 0x59b0, 0x59c8, 0x59d0, 0x59dc, 0x59fc, 0x5a18, 0x5a60, 0x5a6c, 0x5a80, 0x5a8c, 0x5a94, 0x5a9c, 0x5b94, 0x5bfc, 0x5c10, 0x5e48, 0x5e50, 0x5e94, 0x5ea0, 0x5eb0, 0x5ec0, 0x5ec0, 0x5ec8, 0x5ed0, 0x5ee0, 0x5ee0, 0x5ef0, 0x5ef0, 0x5f00, 0x5f00, 0x6000, 0x6020, 0x6028, 0x6040, 0x6058, 0x609c, 0x60a8, 0x619c, 0x7700, 0x7798, 0x77c0, 0x7880, 0x78cc, 0x78fc, 0x7b00, 0x7b58, 0x7b60, 0x7b84, 0x7b8c, 0x7c54, 0x7d00, 0x7d38, 0x7d40, 0x7d84, 0x7d8c, 0x7ddc, 0x7de4, 0x7e04, 0x7e10, 0x7e1c, 0x7e24, 0x7e38, 0x7e40, 0x7e44, 0x7e4c, 0x7e78, 0x7e80, 0x7edc, 0x7ee8, 0x7efc, 0x8dc0, 0x8de0, 0x8df8, 0x8e04, 0x8e10, 0x8e84, 0x8ea0, 0x8f88, 0x8fb8, 0x9058, 0x9060, 0x9060, 0x9068, 0x90f8, 0x9100, 0x9124, 0x9400, 0x9470, 0x9600, 0x9600, 0x9608, 0x9638, 0x9640, 0x9704, 0x9710, 0x971c, 0x9800, 0x9808, 0x9810, 0x9864, 0x9c00, 0x9c6c, 0x9c80, 0x9cec, 0x9d00, 0x9d6c, 0x9d80, 0x9dec, 0x9e00, 0x9e6c, 0x9e80, 0x9eec, 0x9f00, 0x9f6c, 0x9f80, 0xa020, 0xd000, 0xd03c, 0xd100, 0xd118, 0xd200, 0xd214, 0xd220, 0xd234, 0xd240, 0xd254, 0xd260, 0xd274, 0xd280, 0xd294, 0xd2a0, 0xd2b4, 0xd2c0, 0xd2d4, 0xd2e0, 0xd2f4, 0xd300, 0xd31c, 0xdfc0, 0xdfe0, 0xe000, 0xf008, 0xf010, 0xf018, 0xf020, 0xf028, 0x11000, 0x11014, 0x11048, 0x1106c, 0x11074, 0x11088, 0x11098, 0x11120, 0x1112c, 0x1117c, 0x11190, 0x112e0, 0x11300, 0x1130c, 0x12000, 0x1206c, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x190e8, 0x190f0, 0x190f8, 0x19100, 0x19110, 0x19120, 0x19124, 0x19150, 0x19194, 0x1919c, 0x191b0, 0x191d0, 0x191e8, 0x19238, 0x19290, 0x192a4, 0x192b0, 0x19348, 0x1934c, 0x193f8, 0x19418, 0x19420, 0x19428, 0x19430, 0x19444, 0x1944c, 0x1946c, 0x19474, 0x19474, 0x19490, 0x194cc, 0x194f0, 0x194f8, 0x19c00, 0x19c48, 0x19c50, 0x19c80, 0x19c94, 0x19c98, 0x19ca0, 0x19cbc, 0x19ce4, 0x19ce4, 0x19cf0, 0x19cf8, 0x19d00, 0x19d28, 0x19d50, 0x19d78, 0x19d94, 0x19d98, 0x19da0, 0x19de0, 0x19df0, 0x19e10, 0x19e50, 0x19e6c, 0x19ea0, 0x19ebc, 0x19ec4, 0x19ef4, 0x19f04, 0x19f2c, 0x19f34, 0x19f34, 0x19f40, 0x19f50, 0x19f90, 0x19fac, 0x19fc4, 0x19fc8, 0x19fd0, 0x19fe4, 0x1a000, 0x1a004, 0x1a010, 0x1a06c, 0x1a0b0, 0x1a0e4, 0x1a0ec, 0x1a0f8, 0x1a100, 0x1a108, 0x1a114, 0x1a130, 0x1a138, 0x1a1c4, 0x1a1fc, 0x1a1fc, 0x1e008, 0x1e00c, 0x1e040, 0x1e044, 0x1e04c, 0x1e04c, 0x1e284, 0x1e290, 0x1e2c0, 0x1e2c0, 0x1e2e0, 0x1e2e0, 0x1e300, 0x1e384, 0x1e3c0, 0x1e3c8, 0x1e408, 0x1e40c, 0x1e440, 0x1e444, 0x1e44c, 0x1e44c, 0x1e684, 0x1e690, 0x1e6c0, 0x1e6c0, 0x1e6e0, 0x1e6e0, 0x1e700, 0x1e784, 0x1e7c0, 0x1e7c8, 0x1e808, 0x1e80c, 0x1e840, 0x1e844, 0x1e84c, 0x1e84c, 0x1ea84, 0x1ea90, 0x1eac0, 0x1eac0, 0x1eae0, 0x1eae0, 0x1eb00, 0x1eb84, 0x1ebc0, 0x1ebc8, 0x1ec08, 0x1ec0c, 0x1ec40, 0x1ec44, 0x1ec4c, 0x1ec4c, 0x1ee84, 0x1ee90, 0x1eec0, 0x1eec0, 0x1eee0, 0x1eee0, 0x1ef00, 0x1ef84, 0x1efc0, 0x1efc8, 0x1f008, 0x1f00c, 0x1f040, 0x1f044, 0x1f04c, 0x1f04c, 0x1f284, 0x1f290, 0x1f2c0, 0x1f2c0, 0x1f2e0, 0x1f2e0, 0x1f300, 0x1f384, 0x1f3c0, 0x1f3c8, 0x1f408, 0x1f40c, 0x1f440, 0x1f444, 0x1f44c, 0x1f44c, 0x1f684, 0x1f690, 0x1f6c0, 0x1f6c0, 0x1f6e0, 0x1f6e0, 0x1f700, 0x1f784, 0x1f7c0, 0x1f7c8, 0x1f808, 0x1f80c, 0x1f840, 0x1f844, 0x1f84c, 0x1f84c, 0x1fa84, 0x1fa90, 0x1fac0, 0x1fac0, 0x1fae0, 0x1fae0, 0x1fb00, 0x1fb84, 0x1fbc0, 0x1fbc8, 0x1fc08, 0x1fc0c, 0x1fc40, 0x1fc44, 0x1fc4c, 0x1fc4c, 0x1fe84, 0x1fe90, 0x1fec0, 0x1fec0, 0x1fee0, 0x1fee0, 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x30000, 0x30030, 0x30100, 0x30168, 0x30190, 0x301a0, 0x301a8, 0x301b8, 0x301c4, 0x301c8, 0x301d0, 0x301d0, 0x30200, 0x30320, 0x30400, 0x304b4, 0x304c0, 0x3052c, 0x30540, 0x3061c, 0x30800, 0x308a0, 0x308c0, 0x30908, 0x30910, 0x309b8, 0x30a00, 0x30a04, 0x30a0c, 0x30a14, 0x30a1c, 0x30a2c, 0x30a44, 0x30a50, 0x30a74, 0x30a74, 0x30a7c, 0x30afc, 0x30b08, 0x30c24, 0x30d00, 0x30d14, 0x30d1c, 0x30d3c, 0x30d44, 0x30d4c, 0x30d54, 0x30d74, 0x30d7c, 0x30d7c, 0x30de0, 0x30de0, 0x30e00, 0x30ed4, 0x30f00, 0x30fa4, 0x30fc0, 0x30fc4, 0x31000, 0x31004, 0x31080, 0x310fc, 0x31208, 0x31220, 0x3123c, 0x31254, 0x31300, 0x31300, 0x31308, 0x3131c, 0x31338, 0x3133c, 0x31380, 0x31380, 0x31388, 0x313a8, 0x313b4, 0x313b4, 0x31400, 0x31420, 0x31438, 0x3143c, 0x31480, 0x31480, 0x314a8, 0x314a8, 0x314b0, 0x314b4, 0x314c8, 0x314d4, 0x31a40, 0x31a4c, 0x31af0, 0x31b20, 0x31b38, 0x31b3c, 0x31b80, 0x31b80, 0x31ba8, 0x31ba8, 0x31bb0, 0x31bb4, 0x31bc8, 0x31bd4, 0x32140, 0x3218c, 0x321f0, 0x321f4, 0x32200, 0x32200, 0x32218, 0x32218, 0x32400, 0x32400, 0x32408, 0x3241c, 0x32618, 0x32620, 0x32664, 0x32664, 0x326a8, 0x326a8, 0x326ec, 0x326ec, 0x32a00, 0x32abc, 0x32b00, 0x32b18, 0x32b20, 0x32b38, 0x32b40, 0x32b58, 0x32b60, 0x32b78, 0x32c00, 0x32c00, 0x32c08, 0x32c3c, 0x33000, 0x3302c, 0x33034, 0x33050, 0x33058, 0x33058, 0x33060, 0x3308c, 0x3309c, 0x330ac, 0x330c0, 0x330c0, 0x330c8, 0x330d0, 0x330d8, 0x330e0, 0x330ec, 0x3312c, 0x33134, 0x33150, 0x33158, 0x33158, 0x33160, 0x3318c, 0x3319c, 0x331ac, 0x331c0, 0x331c0, 0x331c8, 0x331d0, 0x331d8, 0x331e0, 0x331ec, 0x33290, 0x33298, 0x332c4, 0x332e4, 0x33390, 0x33398, 0x333c4, 0x333e4, 0x3342c, 0x33434, 0x33450, 0x33458, 0x33458, 0x33460, 0x3348c, 0x3349c, 0x334ac, 0x334c0, 0x334c0, 0x334c8, 0x334d0, 0x334d8, 0x334e0, 0x334ec, 0x3352c, 0x33534, 0x33550, 0x33558, 0x33558, 0x33560, 0x3358c, 0x3359c, 0x335ac, 0x335c0, 0x335c0, 0x335c8, 0x335d0, 0x335d8, 0x335e0, 0x335ec, 0x33690, 0x33698, 0x336c4, 0x336e4, 0x33790, 0x33798, 0x337c4, 0x337e4, 0x337fc, 0x33814, 0x33814, 0x33854, 0x33868, 0x33880, 0x3388c, 0x338c0, 0x338d0, 0x338e8, 0x338ec, 0x33900, 0x3392c, 0x33934, 0x33950, 0x33958, 0x33958, 0x33960, 0x3398c, 0x3399c, 0x339ac, 0x339c0, 0x339c0, 0x339c8, 0x339d0, 0x339d8, 0x339e0, 0x339ec, 0x33a90, 0x33a98, 0x33ac4, 0x33ae4, 0x33b10, 0x33b24, 0x33b28, 0x33b38, 0x33b50, 0x33bf0, 0x33c10, 0x33c24, 0x33c28, 0x33c38, 0x33c50, 0x33cf0, 0x33cfc, 0x34000, 0x34030, 0x34100, 0x34168, 0x34190, 0x341a0, 0x341a8, 0x341b8, 0x341c4, 0x341c8, 0x341d0, 0x341d0, 0x34200, 0x34320, 0x34400, 0x344b4, 0x344c0, 0x3452c, 0x34540, 0x3461c, 0x34800, 0x348a0, 0x348c0, 0x34908, 0x34910, 0x349b8, 0x34a00, 0x34a04, 0x34a0c, 0x34a14, 0x34a1c, 0x34a2c, 0x34a44, 0x34a50, 0x34a74, 0x34a74, 0x34a7c, 0x34afc, 0x34b08, 0x34c24, 0x34d00, 0x34d14, 0x34d1c, 0x34d3c, 0x34d44, 0x34d4c, 0x34d54, 0x34d74, 0x34d7c, 0x34d7c, 0x34de0, 0x34de0, 0x34e00, 0x34ed4, 0x34f00, 0x34fa4, 0x34fc0, 0x34fc4, 0x35000, 0x35004, 0x35080, 0x350fc, 0x35208, 0x35220, 0x3523c, 0x35254, 0x35300, 0x35300, 0x35308, 0x3531c, 0x35338, 0x3533c, 0x35380, 0x35380, 0x35388, 0x353a8, 0x353b4, 0x353b4, 0x35400, 0x35420, 0x35438, 0x3543c, 0x35480, 0x35480, 0x354a8, 0x354a8, 0x354b0, 0x354b4, 0x354c8, 0x354d4, 0x35a40, 0x35a4c, 0x35af0, 0x35b20, 0x35b38, 0x35b3c, 0x35b80, 0x35b80, 0x35ba8, 0x35ba8, 0x35bb0, 0x35bb4, 0x35bc8, 0x35bd4, 0x36140, 0x3618c, 0x361f0, 0x361f4, 0x36200, 0x36200, 0x36218, 0x36218, 0x36400, 0x36400, 0x36408, 0x3641c, 0x36618, 0x36620, 0x36664, 0x36664, 0x366a8, 0x366a8, 0x366ec, 0x366ec, 0x36a00, 0x36abc, 0x36b00, 0x36b18, 0x36b20, 0x36b38, 0x36b40, 0x36b58, 0x36b60, 0x36b78, 0x36c00, 0x36c00, 0x36c08, 0x36c3c, 0x37000, 0x3702c, 0x37034, 0x37050, 0x37058, 0x37058, 0x37060, 0x3708c, 0x3709c, 0x370ac, 0x370c0, 0x370c0, 0x370c8, 0x370d0, 0x370d8, 0x370e0, 0x370ec, 0x3712c, 0x37134, 0x37150, 0x37158, 0x37158, 0x37160, 0x3718c, 0x3719c, 0x371ac, 0x371c0, 0x371c0, 0x371c8, 0x371d0, 0x371d8, 0x371e0, 0x371ec, 0x37290, 0x37298, 0x372c4, 0x372e4, 0x37390, 0x37398, 0x373c4, 0x373e4, 0x3742c, 0x37434, 0x37450, 0x37458, 0x37458, 0x37460, 0x3748c, 0x3749c, 0x374ac, 0x374c0, 0x374c0, 0x374c8, 0x374d0, 0x374d8, 0x374e0, 0x374ec, 0x3752c, 0x37534, 0x37550, 0x37558, 0x37558, 0x37560, 0x3758c, 0x3759c, 0x375ac, 0x375c0, 0x375c0, 0x375c8, 0x375d0, 0x375d8, 0x375e0, 0x375ec, 0x37690, 0x37698, 0x376c4, 0x376e4, 0x37790, 0x37798, 0x377c4, 0x377e4, 0x377fc, 0x37814, 0x37814, 0x37854, 0x37868, 0x37880, 0x3788c, 0x378c0, 0x378d0, 0x378e8, 0x378ec, 0x37900, 0x3792c, 0x37934, 0x37950, 0x37958, 0x37958, 0x37960, 0x3798c, 0x3799c, 0x379ac, 0x379c0, 0x379c0, 0x379c8, 0x379d0, 0x379d8, 0x379e0, 0x379ec, 0x37a90, 0x37a98, 0x37ac4, 0x37ae4, 0x37b10, 0x37b24, 0x37b28, 0x37b38, 0x37b50, 0x37bf0, 0x37c10, 0x37c24, 0x37c28, 0x37c38, 0x37c50, 0x37cf0, 0x37cfc, 0x40040, 0x40040, 0x40080, 0x40084, 0x40100, 0x40100, 0x40140, 0x401bc, 0x40200, 0x40214, 0x40228, 0x40228, 0x40240, 0x40258, 0x40280, 0x40280, 0x40304, 0x40304, 0x40330, 0x4033c, 0x41304, 0x413c8, 0x413d0, 0x413dc, 0x413f0, 0x413f0, 0x41400, 0x4140c, 0x41414, 0x4141c, 0x41480, 0x414d0, 0x44000, 0x4407c, 0x440c0, 0x441ac, 0x441b4, 0x4427c, 0x442c0, 0x443ac, 0x443b4, 0x4447c, 0x444c0, 0x445ac, 0x445b4, 0x4467c, 0x446c0, 0x447ac, 0x447b4, 0x4487c, 0x448c0, 0x449ac, 0x449b4, 0x44a7c, 0x44ac0, 0x44bac, 0x44bb4, 0x44c7c, 0x44cc0, 0x44dac, 0x44db4, 0x44e7c, 0x44ec0, 0x44fac, 0x44fb4, 0x4507c, 0x450c0, 0x451ac, 0x451b4, 0x451fc, 0x45800, 0x45804, 0x45810, 0x45830, 0x45840, 0x45860, 0x45868, 0x45868, 0x45880, 0x45884, 0x458a0, 0x458b0, 0x45a00, 0x45a04, 0x45a10, 0x45a30, 0x45a40, 0x45a60, 0x45a68, 0x45a68, 0x45a80, 0x45a84, 0x45aa0, 0x45ab0, 0x460c0, 0x460e4, 0x47000, 0x4703c, 0x47044, 0x4708c, 0x47200, 0x47250, 0x47400, 0x47408, 0x47414, 0x47420, 0x47600, 0x47618, 0x47800, 0x47814, 0x47820, 0x4782c, 0x50000, 0x50084, 0x50090, 0x500cc, 0x50300, 0x50384, 0x50400, 0x50400, 0x50800, 0x50884, 0x50890, 0x508cc, 0x50b00, 0x50b84, 0x50c00, 0x50c00, 0x51000, 0x51020, 0x51028, 0x510b0, 0x51300, 0x51324, }; static const unsigned int t6vf_reg_ranges[] = { VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), VF_MPS_REG(A_MPS_VF_CTL), VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), FW_T6VF_MBDATA_BASE_ADDR, FW_T6VF_MBDATA_BASE_ADDR + ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), }; u32 *buf_end = (u32 *)(buf + buf_size); const unsigned int *reg_ranges; int reg_ranges_size, range; unsigned int chip_version = chip_id(adap); /* * Select the right set of register ranges to dump depending on the * adapter chip type. */ switch (chip_version) { case CHELSIO_T4: if (adap->flags & IS_VF) { reg_ranges = t4vf_reg_ranges; reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges); } else { reg_ranges = t4_reg_ranges; reg_ranges_size = ARRAY_SIZE(t4_reg_ranges); } break; case CHELSIO_T5: if (adap->flags & IS_VF) { reg_ranges = t5vf_reg_ranges; reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges); } else { reg_ranges = t5_reg_ranges; reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); } break; case CHELSIO_T6: if (adap->flags & IS_VF) { reg_ranges = t6vf_reg_ranges; reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges); } else { reg_ranges = t6_reg_ranges; reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); } break; default: CH_ERR(adap, "Unsupported chip version %d\n", chip_version); return; } /* * Clear the register buffer and insert the appropriate register * values selected by the above register ranges. */ memset(buf, 0, buf_size); for (range = 0; range < reg_ranges_size; range += 2) { unsigned int reg = reg_ranges[range]; unsigned int last_reg = reg_ranges[range + 1]; u32 *bufp = (u32 *)(buf + reg); /* * Iterate across the register range filling in the register * buffer but don't write past the end of the register buffer. */ while (reg <= last_reg && bufp < buf_end) { *bufp++ = t4_read_reg(adap, reg); reg += sizeof(u32); } } } /* * Partial EEPROM Vital Product Data structure. The VPD starts with one ID * header followed by one or more VPD-R sections, each with its own header. */ struct t4_vpd_hdr { u8 id_tag; u8 id_len[2]; u8 id_data[ID_LEN]; }; struct t4_vpdr_hdr { u8 vpdr_tag; u8 vpdr_len[2]; }; /* * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */ #define EEPROM_DELAY 10 /* 10us per poll spin */ #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */ #define EEPROM_STAT_ADDR 0x7bfc #define VPD_SIZE 0x800 #define VPD_BASE 0x400 #define VPD_BASE_OLD 0 #define VPD_LEN 1024 #define VPD_INFO_FLD_HDR_SIZE 3 #define CHELSIO_VPD_UNIQUE_ID 0x82 /* * Small utility function to wait till any outstanding VPD Access is complete. * We have a per-adapter state variable "VPD Busy" to indicate when we have a * VPD Access in flight. This allows us to handle the problem of having a * previous VPD Access time out and prevent an attempt to inject a new VPD * Request before any in-flight VPD reguest has completed. */ static int t4_seeprom_wait(struct adapter *adapter) { unsigned int base = adapter->params.pci.vpd_cap_addr; int max_poll; /* * If no VPD Access is in flight, we can just return success right * away. */ if (!adapter->vpd_busy) return 0; /* * Poll the VPD Capability Address/Flag register waiting for it * to indicate that the operation is complete. */ max_poll = EEPROM_MAX_POLL; do { u16 val; udelay(EEPROM_DELAY); t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); /* * If the operation is complete, mark the VPD as no longer * busy and return success. */ if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) { adapter->vpd_busy = 0; return 0; } } while (--max_poll); /* * Failure! Note that we leave the VPD Busy status set in order to * avoid pushing a new VPD Access request into the VPD Capability till * the current operation eventually succeeds. It's a bug to issue a * new request when an existing request is in flight and will result * in corrupt hardware state. */ return -ETIMEDOUT; } /** * t4_seeprom_read - read a serial EEPROM location * @adapter: adapter to read * @addr: EEPROM virtual address * @data: where to store the read data * * Read a 32-bit word from a location in serial EEPROM using the card's PCI * VPD capability. Note that this function must be called with a virtual * address. */ int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) { unsigned int base = adapter->params.pci.vpd_cap_addr; int ret; /* * VPD Accesses must alway be 4-byte aligned! */ if (addr >= EEPROMVSIZE || (addr & 3)) return -EINVAL; /* * Wait for any previous operation which may still be in flight to * complete. */ ret = t4_seeprom_wait(adapter); if (ret) { CH_ERR(adapter, "VPD still busy from previous operation\n"); return ret; } /* * Issue our new VPD Read request, mark the VPD as being busy and wait * for our request to complete. If it doesn't complete, note the * error and return it to our caller. Note that we do not reset the * VPD Busy status! */ t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); adapter->vpd_busy = 1; adapter->vpd_flag = PCI_VPD_ADDR_F; ret = t4_seeprom_wait(adapter); if (ret) { CH_ERR(adapter, "VPD read of address %#x failed\n", addr); return ret; } /* * Grab the returned data, swizzle it into our endianness and * return success. */ t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); *data = le32_to_cpu(*data); return 0; } /** * t4_seeprom_write - write a serial EEPROM location * @adapter: adapter to write * @addr: virtual EEPROM address * @data: value to write * * Write a 32-bit word to a location in serial EEPROM using the card's PCI * VPD capability. Note that this function must be called with a virtual * address. */ int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) { unsigned int base = adapter->params.pci.vpd_cap_addr; int ret; u32 stats_reg; int max_poll; /* * VPD Accesses must alway be 4-byte aligned! */ if (addr >= EEPROMVSIZE || (addr & 3)) return -EINVAL; /* * Wait for any previous operation which may still be in flight to * complete. */ ret = t4_seeprom_wait(adapter); if (ret) { CH_ERR(adapter, "VPD still busy from previous operation\n"); return ret; } /* * Issue our new VPD Read request, mark the VPD as being busy and wait * for our request to complete. If it doesn't complete, note the * error and return it to our caller. Note that we do not reset the * VPD Busy status! */ t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, cpu_to_le32(data)); t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr | PCI_VPD_ADDR_F); adapter->vpd_busy = 1; adapter->vpd_flag = 0; ret = t4_seeprom_wait(adapter); if (ret) { CH_ERR(adapter, "VPD write of address %#x failed\n", addr); return ret; } /* * Reset PCI_VPD_DATA register after a transaction and wait for our * request to complete. If it doesn't complete, return error. */ t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0); max_poll = EEPROM_MAX_POLL; do { udelay(EEPROM_DELAY); t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg); } while ((stats_reg & 0x1) && --max_poll); if (!max_poll) return -ETIMEDOUT; /* Return success! */ return 0; } /** * t4_eeprom_ptov - translate a physical EEPROM address to virtual * @phys_addr: the physical EEPROM address * @fn: the PCI function number * @sz: size of function-specific area * * Translate a physical EEPROM address to virtual. The first 1K is * accessed through virtual addresses starting at 31K, the rest is * accessed through virtual addresses starting at 0. * * The mapping is as follows: * [0..1K) -> [31K..32K) * [1K..1K+A) -> [ES-A..ES) * [1K+A..ES) -> [0..ES-A-1K) * * where A = @fn * @sz, and ES = EEPROM size. */ int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) { fn *= sz; if (phys_addr < 1024) return phys_addr + (31 << 10); if (phys_addr < 1024 + fn) return EEPROMSIZE - fn + phys_addr - 1024; if (phys_addr < EEPROMSIZE) return phys_addr - 1024 - fn; return -EINVAL; } /** * t4_seeprom_wp - enable/disable EEPROM write protection * @adapter: the adapter * @enable: whether to enable or disable write protection * * Enables or disables write protection on the serial EEPROM. */ int t4_seeprom_wp(struct adapter *adapter, int enable) { return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); } /** * get_vpd_keyword_val - Locates an information field keyword in the VPD * @vpd: Pointer to buffered vpd data structure * @kw: The keyword to search for * @region: VPD region to search (starting from 0) * * Returns the value of the information field keyword or * -ENOENT otherwise. */ static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region) { int i, tag; unsigned int offset, len; const struct t4_vpdr_hdr *vpdr; offset = sizeof(struct t4_vpd_hdr); vpdr = (const void *)(vpd + offset); tag = vpdr->vpdr_tag; len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8); while (region--) { offset += sizeof(struct t4_vpdr_hdr) + len; vpdr = (const void *)(vpd + offset); if (++tag != vpdr->vpdr_tag) return -ENOENT; len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8); } offset += sizeof(struct t4_vpdr_hdr); if (offset + len > VPD_LEN) { return -ENOENT; } for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { if (memcmp(vpd + i , kw , 2) == 0){ i += VPD_INFO_FLD_HDR_SIZE; return i; } i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2]; } return -ENOENT; } /** * get_vpd_params - read VPD parameters from VPD EEPROM * @adapter: adapter to read * @p: where to store the parameters * @vpd: caller provided temporary space to read the VPD into * * Reads card parameters stored in VPD EEPROM. */ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p, uint16_t device_id, u32 *buf) { int i, ret, addr; int ec, sn, pn, na, md; u8 csum; const u8 *vpd = (const u8 *)buf; /* * Card information normally starts at VPD_BASE but early cards had * it at 0. */ ret = t4_seeprom_read(adapter, VPD_BASE, buf); if (ret) return (ret); /* * The VPD shall have a unique identifier specified by the PCI SIG. * For chelsio adapters, the identifier is 0x82. The first byte of a VPD * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software * is expected to automatically put this entry at the * beginning of the VPD. */ addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; for (i = 0; i < VPD_LEN; i += 4) { ret = t4_seeprom_read(adapter, addr + i, buf++); if (ret) return ret; } #define FIND_VPD_KW(var,name) do { \ var = get_vpd_keyword_val(vpd, name, 0); \ if (var < 0) { \ CH_ERR(adapter, "missing VPD keyword " name "\n"); \ return -EINVAL; \ } \ } while (0) FIND_VPD_KW(i, "RV"); for (csum = 0; i >= 0; i--) csum += vpd[i]; if (csum) { CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum); return -EINVAL; } FIND_VPD_KW(ec, "EC"); FIND_VPD_KW(sn, "SN"); FIND_VPD_KW(pn, "PN"); FIND_VPD_KW(na, "NA"); #undef FIND_VPD_KW memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN); strstrip(p->id); memcpy(p->ec, vpd + ec, EC_LEN); strstrip(p->ec); i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); strstrip(p->sn); i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; memcpy(p->pn, vpd + pn, min(i, PN_LEN)); strstrip((char *)p->pn); i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); strstrip((char *)p->na); if (device_id & 0x80) return 0; /* Custom card */ md = get_vpd_keyword_val(vpd, "VF", 1); if (md < 0) { snprintf(p->md, sizeof(p->md), "unknown"); } else { i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2]; memcpy(p->md, vpd + md, min(i, MD_LEN)); strstrip((char *)p->md); } return 0; } /* serial flash and firmware constants and flash config file constants */ enum { SF_ATTEMPTS = 10, /* max retries for SF operations */ /* flash command opcodes */ SF_PROG_PAGE = 2, /* program 256B page */ SF_WR_DISABLE = 4, /* disable writes */ SF_RD_STATUS = 5, /* read status register */ SF_WR_ENABLE = 6, /* enable writes */ SF_RD_DATA_FAST = 0xb, /* read flash */ SF_RD_ID = 0x9f, /* read ID */ SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */ }; /** * sf1_read - read data from the serial flash * @adapter: the adapter * @byte_cnt: number of bytes to read * @cont: whether another operation will be chained * @lock: whether to lock SF for PL access only * @valp: where to store the read data * * Reads up to 4 bytes of data from the serial flash. The location of * the read needs to be specified prior to calling this by issuing the * appropriate commands to the serial flash. */ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock, u32 *valp) { int ret; if (!byte_cnt || byte_cnt > 4) return -EINVAL; if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) return -EBUSY; t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); if (!ret) *valp = t4_read_reg(adapter, A_SF_DATA); return ret; } /** * sf1_write - write data to the serial flash * @adapter: the adapter * @byte_cnt: number of bytes to write * @cont: whether another operation will be chained * @lock: whether to lock SF for PL access only * @val: value to write * * Writes up to 4 bytes of data to the serial flash. The location of * the write needs to be specified prior to calling this by issuing the * appropriate commands to the serial flash. */ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock, u32 val) { if (!byte_cnt || byte_cnt > 4) return -EINVAL; if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) return -EBUSY; t4_write_reg(adapter, A_SF_DATA, val); t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); } /** * flash_wait_op - wait for a flash operation to complete * @adapter: the adapter * @attempts: max number of polls of the status register * @delay: delay between polls in ms * * Wait for a flash operation to complete by polling the status register. */ static int flash_wait_op(struct adapter *adapter, int attempts, int delay) { int ret; u32 status; while (1) { if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) return ret; if (!(status & 1)) return 0; if (--attempts == 0) return -EAGAIN; if (delay) msleep(delay); } } /** * t4_read_flash - read words from serial flash * @adapter: the adapter * @addr: the start address for the read * @nwords: how many 32-bit words to read * @data: where to store the read data * @byte_oriented: whether to store data as bytes or as words * * Read the specified number of 32-bit words from the serial flash. * If @byte_oriented is set the read data is stored as a byte array * (i.e., big-endian), otherwise as 32-bit words in the platform's * natural endianness. */ int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented) { int ret; if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) return -EINVAL; addr = swab32(addr) | SF_RD_DATA_FAST; if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) return ret; for ( ; nwords; nwords--, data++) { ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); if (nwords == 1) t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ if (ret) return ret; if (byte_oriented) *data = (__force __u32)(cpu_to_be32(*data)); } return 0; } /** * t4_write_flash - write up to a page of data to the serial flash * @adapter: the adapter * @addr: the start address to write * @n: length of data to write in bytes * @data: the data to write * @byte_oriented: whether to store data as bytes or as words * * Writes up to a page of data (256 bytes) to the serial flash starting * at the given address. All the data must be written to the same page. * If @byte_oriented is set the write data is stored as byte stream * (i.e. matches what on disk), otherwise in big-endian. */ int t4_write_flash(struct adapter *adapter, unsigned int addr, unsigned int n, const u8 *data, int byte_oriented) { int ret; u32 buf[SF_PAGE_SIZE / 4]; unsigned int i, c, left, val, offset = addr & 0xff; if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) return -EINVAL; val = swab32(addr) | SF_PROG_PAGE; if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) goto unlock; for (left = n; left; left -= c) { c = min(left, 4U); for (val = 0, i = 0; i < c; ++i) val = (val << 8) + *data++; if (!byte_oriented) val = cpu_to_be32(val); ret = sf1_write(adapter, c, c != left, 1, val); if (ret) goto unlock; } ret = flash_wait_op(adapter, 8, 1); if (ret) goto unlock; t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ /* Read the page to verify the write succeeded */ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, byte_oriented); if (ret) return ret; if (memcmp(data - n, (u8 *)buf + offset, n)) { CH_ERR(adapter, "failed to correctly write the flash page at %#x\n", addr); return -EIO; } return 0; unlock: t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ return ret; } /** * t4_get_fw_version - read the firmware version * @adapter: the adapter * @vers: where to place the version * * Reads the FW version from flash. */ int t4_get_fw_version(struct adapter *adapter, u32 *vers) { return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1, vers, 0); } /** * t4_get_fw_hdr - read the firmware header * @adapter: the adapter * @hdr: where to place the version * * Reads the FW header from flash into caller provided buffer. */ int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr) { return t4_read_flash(adapter, FLASH_FW_START, sizeof (*hdr) / sizeof (uint32_t), (uint32_t *)hdr, 1); } /** * t4_get_bs_version - read the firmware bootstrap version * @adapter: the adapter * @vers: where to place the version * * Reads the FW Bootstrap version from flash. */ int t4_get_bs_version(struct adapter *adapter, u32 *vers) { return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START + offsetof(struct fw_hdr, fw_ver), 1, vers, 0); } /** * t4_get_tp_version - read the TP microcode version * @adapter: the adapter * @vers: where to place the version * * Reads the TP microcode version from flash. */ int t4_get_tp_version(struct adapter *adapter, u32 *vers) { return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0); } /** * t4_get_exprom_version - return the Expansion ROM version (if any) * @adapter: the adapter * @vers: where to place the version * * Reads the Expansion ROM header from FLASH and returns the version * number (if present) through the @vers return value pointer. We return * this in the Firmware Version Format since it's convenient. Return * 0 on success, -ENOENT if no Expansion ROM is present. */ int t4_get_exprom_version(struct adapter *adapter, u32 *vers) { struct exprom_header { unsigned char hdr_arr[16]; /* must start with 0x55aa */ unsigned char hdr_ver[4]; /* Expansion ROM version */ } *hdr; u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), sizeof(u32))]; int ret; ret = t4_read_flash(adapter, FLASH_EXP_ROM_START, ARRAY_SIZE(exprom_header_buf), exprom_header_buf, 0); if (ret) return ret; hdr = (struct exprom_header *)exprom_header_buf; if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) return -ENOENT; *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) | V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) | V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) | V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3])); return 0; } /** * t4_get_scfg_version - return the Serial Configuration version * @adapter: the adapter * @vers: where to place the version * * Reads the Serial Configuration Version via the Firmware interface * (thus this can only be called once we're ready to issue Firmware * commands). The format of the Serial Configuration version is * adapter specific. Returns 0 on success, an error on failure. * * Note that early versions of the Firmware didn't include the ability * to retrieve the Serial Configuration version, so we zero-out the * return-value parameter in that case to avoid leaving it with * garbage in it. * * Also note that the Firmware will return its cached copy of the Serial * Initialization Revision ID, not the actual Revision ID as written in * the Serial EEPROM. This is only an issue if a new VPD has been written * and the Firmware/Chip haven't yet gone through a RESET sequence. So * it's best to defer calling this routine till after a FW_RESET_CMD has * been issued if the Host Driver will be performing a full adapter * initialization. */ int t4_get_scfg_version(struct adapter *adapter, u32 *vers) { u32 scfgrev_param; int ret; scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV)); ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, &scfgrev_param, vers); if (ret) *vers = 0; return ret; } /** * t4_get_vpd_version - return the VPD version * @adapter: the adapter * @vers: where to place the version * * Reads the VPD via the Firmware interface (thus this can only be called * once we're ready to issue Firmware commands). The format of the * VPD version is adapter specific. Returns 0 on success, an error on * failure. * * Note that early versions of the Firmware didn't include the ability * to retrieve the VPD version, so we zero-out the return-value parameter * in that case to avoid leaving it with garbage in it. * * Also note that the Firmware will return its cached copy of the VPD * Revision ID, not the actual Revision ID as written in the Serial * EEPROM. This is only an issue if a new VPD has been written and the * Firmware/Chip haven't yet gone through a RESET sequence. So it's best * to defer calling this routine till after a FW_RESET_CMD has been issued * if the Host Driver will be performing a full adapter initialization. */ int t4_get_vpd_version(struct adapter *adapter, u32 *vers) { u32 vpdrev_param; int ret; vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV)); ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, &vpdrev_param, vers); if (ret) *vers = 0; return ret; } /** * t4_get_version_info - extract various chip/firmware version information * @adapter: the adapter * * Reads various chip/firmware version numbers and stores them into the * adapter Adapter Parameters structure. If any of the efforts fails * the first failure will be returned, but all of the version numbers * will be read. */ int t4_get_version_info(struct adapter *adapter) { int ret = 0; #define FIRST_RET(__getvinfo) \ do { \ int __ret = __getvinfo; \ if (__ret && !ret) \ ret = __ret; \ } while (0) FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers)); FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers)); FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers)); FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers)); FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers)); FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers)); #undef FIRST_RET return ret; } /** * t4_flash_erase_sectors - erase a range of flash sectors * @adapter: the adapter * @start: the first sector to erase * @end: the last sector to erase * * Erases the sectors in the given inclusive range. */ int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) { int ret = 0; if (end >= adapter->params.sf_nsec) return -EINVAL; while (start <= end) { if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 0, 1, SF_ERASE_SECTOR | (start << 8))) != 0 || (ret = flash_wait_op(adapter, 14, 500)) != 0) { CH_ERR(adapter, "erase of flash sector %d failed, error %d\n", start, ret); break; } start++; } t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ return ret; } /** * t4_flash_cfg_addr - return the address of the flash configuration file * @adapter: the adapter * * Return the address within the flash where the Firmware Configuration * File is stored, or an error if the device FLASH is too small to contain * a Firmware Configuration File. */ int t4_flash_cfg_addr(struct adapter *adapter) { /* * If the device FLASH isn't large enough to hold a Firmware * Configuration File, return an error. */ if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) return -ENOSPC; return FLASH_CFG_START; } /* * Return TRUE if the specified firmware matches the adapter. I.e. T4 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead * and emit an error message for mismatched firmware to save our caller the * effort ... */ static int t4_fw_matches_chip(struct adapter *adap, const struct fw_hdr *hdr) { /* * The expression below will return FALSE for any unsupported adapter * which will keep us "honest" in the future ... */ if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) || (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) || (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6)) return 1; CH_ERR(adap, "FW image (%d) is not suitable for this adapter (%d)\n", hdr->chip, chip_id(adap)); return 0; } /** * t4_load_fw - download firmware * @adap: the adapter * @fw_data: the firmware image to write * @size: image size * * Write the supplied firmware image to the card's serial flash. */ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) { u32 csum; int ret, addr; unsigned int i; u8 first_page[SF_PAGE_SIZE]; const u32 *p = (const u32 *)fw_data; const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; unsigned int fw_start_sec; unsigned int fw_start; unsigned int fw_size; if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) { fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC; fw_start = FLASH_FWBOOTSTRAP_START; fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE; } else { fw_start_sec = FLASH_FW_START_SEC; fw_start = FLASH_FW_START; fw_size = FLASH_FW_MAX_SIZE; } if (!size) { CH_ERR(adap, "FW image has no data\n"); return -EINVAL; } if (size & 511) { CH_ERR(adap, "FW image size not multiple of 512 bytes\n"); return -EINVAL; } if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) { CH_ERR(adap, "FW image size differs from size in FW header\n"); return -EINVAL; } if (size > fw_size) { CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size); return -EFBIG; } if (!t4_fw_matches_chip(adap, hdr)) return -EINVAL; for (csum = 0, i = 0; i < size / sizeof(csum); i++) csum += be32_to_cpu(p[i]); if (csum != 0xffffffff) { CH_ERR(adap, "corrupted firmware image, checksum %#x\n", csum); return -EINVAL; } i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); if (ret) goto out; /* * We write the correct version at the end so the driver can see a bad * version if the FW write fails. Start by writing a copy of the * first page with a bad version. */ memcpy(first_page, fw_data, SF_PAGE_SIZE); ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); if (ret) goto out; addr = fw_start; for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { addr += SF_PAGE_SIZE; fw_data += SF_PAGE_SIZE; ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); if (ret) goto out; } ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver), sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); out: if (ret) CH_ERR(adap, "firmware download failed, error %d\n", ret); return ret; } /** * t4_fwcache - firmware cache operation * @adap: the adapter * @op : the operation (flush or flush and invalidate) */ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) { struct fw_params_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(adap->pf) | V_FW_PARAMS_CMD_VFN(0)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c.param[0].mnem = cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE)); c.param[0].val = (__force __be32)op; return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); } void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr) { int i, j; u32 cfg, val, req, rsp; cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); if (cfg & F_LADBGEN) t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); val = t4_read_reg(adap, A_CIM_DEBUGSTS); req = G_POLADBGWRPTR(val); rsp = G_PILADBGWRPTR(val); if (pif_req_wrptr) *pif_req_wrptr = req; if (pif_rsp_wrptr) *pif_rsp_wrptr = rsp; for (i = 0; i < CIM_PIFLA_SIZE; i++) { for (j = 0; j < 6; j++) { t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | V_PILADBGRDPTR(rsp)); *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); req++; rsp++; } req = (req + 2) & M_POLADBGRDPTR; rsp = (rsp + 2) & M_PILADBGRDPTR; } t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); } void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) { u32 cfg; int i, j, idx; cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); if (cfg & F_LADBGEN) t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); for (i = 0; i < CIM_MALA_SIZE; i++) { for (j = 0; j < 5; j++) { idx = 8 * i + j; t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | V_PILADBGRDPTR(idx)); *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); } } t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); } void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) { unsigned int i, j; for (i = 0; i < 8; i++) { u32 *p = la_buf + i; t4_write_reg(adap, A_ULP_RX_LA_CTL, i); j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); } } /** * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits * @caps16: a 16-bit Port Capabilities value * * Returns the equivalent 32-bit Port Capabilities value. */ static uint32_t fwcaps16_to_caps32(uint16_t caps16) { uint32_t caps32 = 0; #define CAP16_TO_CAP32(__cap) \ do { \ if (caps16 & FW_PORT_CAP_##__cap) \ caps32 |= FW_PORT_CAP32_##__cap; \ } while (0) CAP16_TO_CAP32(SPEED_100M); CAP16_TO_CAP32(SPEED_1G); CAP16_TO_CAP32(SPEED_25G); CAP16_TO_CAP32(SPEED_10G); CAP16_TO_CAP32(SPEED_40G); CAP16_TO_CAP32(SPEED_100G); CAP16_TO_CAP32(FC_RX); CAP16_TO_CAP32(FC_TX); CAP16_TO_CAP32(ANEG); CAP16_TO_CAP32(FORCE_PAUSE); CAP16_TO_CAP32(MDIAUTO); CAP16_TO_CAP32(MDISTRAIGHT); CAP16_TO_CAP32(FEC_RS); CAP16_TO_CAP32(FEC_BASER_RS); CAP16_TO_CAP32(802_3_PAUSE); CAP16_TO_CAP32(802_3_ASM_DIR); #undef CAP16_TO_CAP32 return caps32; } /** * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits * @caps32: a 32-bit Port Capabilities value * * Returns the equivalent 16-bit Port Capabilities value. Note that * not all 32-bit Port Capabilities can be represented in the 16-bit * Port Capabilities and some fields/values may not make it. */ static uint16_t fwcaps32_to_caps16(uint32_t caps32) { uint16_t caps16 = 0; #define CAP32_TO_CAP16(__cap) \ do { \ if (caps32 & FW_PORT_CAP32_##__cap) \ caps16 |= FW_PORT_CAP_##__cap; \ } while (0) CAP32_TO_CAP16(SPEED_100M); CAP32_TO_CAP16(SPEED_1G); CAP32_TO_CAP16(SPEED_10G); CAP32_TO_CAP16(SPEED_25G); CAP32_TO_CAP16(SPEED_40G); CAP32_TO_CAP16(SPEED_100G); CAP32_TO_CAP16(FC_RX); CAP32_TO_CAP16(FC_TX); CAP32_TO_CAP16(802_3_PAUSE); CAP32_TO_CAP16(802_3_ASM_DIR); CAP32_TO_CAP16(ANEG); CAP32_TO_CAP16(FORCE_PAUSE); CAP32_TO_CAP16(MDIAUTO); CAP32_TO_CAP16(MDISTRAIGHT); CAP32_TO_CAP16(FEC_RS); CAP32_TO_CAP16(FEC_BASER_RS); #undef CAP32_TO_CAP16 return caps16; } static bool is_bt(struct port_info *pi) { return (pi->port_type == FW_PORT_TYPE_BT_SGMII || pi->port_type == FW_PORT_TYPE_BT_XFI || pi->port_type == FW_PORT_TYPE_BT_XAUI); } static int8_t fwcap_to_fec(uint32_t caps, bool unset_means_none) { int8_t fec = 0; if ((caps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)) == 0) return (unset_means_none ? FEC_NONE : 0); if (caps & FW_PORT_CAP32_FEC_RS) fec |= FEC_RS; if (caps & FW_PORT_CAP32_FEC_BASER_RS) fec |= FEC_BASER_RS; if (caps & FW_PORT_CAP32_FEC_NO_FEC) fec |= FEC_NONE; return (fec); } /* * Note that 0 is not translated to NO_FEC. */ static uint32_t fec_to_fwcap(int8_t fec) { uint32_t caps = 0; /* Only real FECs allowed. */ MPASS((fec & ~M_FW_PORT_CAP32_FEC) == 0); if (fec & FEC_RS) caps |= FW_PORT_CAP32_FEC_RS; if (fec & FEC_BASER_RS) caps |= FW_PORT_CAP32_FEC_BASER_RS; if (fec & FEC_NONE) caps |= FW_PORT_CAP32_FEC_NO_FEC; return (caps); } /** * t4_link_l1cfg - apply link configuration to MAC/PHY * @phy: the PHY to setup * @mac: the MAC to setup * @lc: the requested link configuration * * Set up a port's MAC and PHY according to a desired link configuration. * - If the PHY can auto-negotiate first decide what to advertise, then * enable/disable auto-negotiation as desired, and reset. * - If the PHY does not auto-negotiate just reset it. * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; unsigned int mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO); unsigned int aneg, fc, fec, speed, rcap; fc = 0; if (lc->requested_fc & PAUSE_RX) fc |= FW_PORT_CAP32_FC_RX; if (lc->requested_fc & PAUSE_TX) fc |= FW_PORT_CAP32_FC_TX; if (!(lc->requested_fc & PAUSE_AUTONEG)) fc |= FW_PORT_CAP32_FORCE_PAUSE; if (lc->requested_aneg == AUTONEG_DISABLE) aneg = 0; else if (lc->requested_aneg == AUTONEG_ENABLE) aneg = FW_PORT_CAP32_ANEG; else aneg = lc->pcaps & FW_PORT_CAP32_ANEG; if (aneg) { speed = lc->pcaps & V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED); } else if (lc->requested_speed != 0) speed = speed_to_fwcap(lc->requested_speed); else speed = fwcap_top_speed(lc->pcaps); fec = 0; if (fec_supported(speed)) { if (lc->requested_fec == FEC_AUTO) { if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC) { if (speed & FW_PORT_CAP32_SPEED_100G) { fec |= FW_PORT_CAP32_FEC_RS; fec |= FW_PORT_CAP32_FEC_NO_FEC; } else { fec |= FW_PORT_CAP32_FEC_RS; fec |= FW_PORT_CAP32_FEC_BASER_RS; fec |= FW_PORT_CAP32_FEC_NO_FEC; } } else { /* Set only 1b with old firmwares. */ fec |= fec_to_fwcap(lc->fec_hint); } } else { fec |= fec_to_fwcap(lc->requested_fec & M_FW_PORT_CAP32_FEC); if (lc->requested_fec & FEC_MODULE) fec |= fec_to_fwcap(lc->fec_hint); } if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC) fec |= FW_PORT_CAP32_FORCE_FEC; else if (fec == FW_PORT_CAP32_FEC_NO_FEC) fec = 0; } /* Force AN on for BT cards. */ if (is_bt(adap->port[adap->chan_map[port]])) aneg = lc->pcaps & FW_PORT_CAP32_ANEG; rcap = aneg | speed | fc | fec; if ((rcap | lc->pcaps) != lc->pcaps) { #ifdef INVARIANTS CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x, removed 0x%x\n", rcap, lc->pcaps, rcap & (rcap ^ lc->pcaps)); #endif rcap &= lc->pcaps; } rcap |= mdi; memset(&c, 0, sizeof(c)); c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port)); if (adap->params.port_caps32) { c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) | FW_LEN16(c)); c.u.l1cfg32.rcap32 = cpu_to_be32(rcap); } else { c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); c.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); } return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); } /** * t4_restart_aneg - restart autonegotiation * @adap: the adapter * @mbox: mbox to use for the FW command * @port: the port id * * Restarts autonegotiation for the selected port. */ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) { struct fw_port_cmd c; memset(&c, 0, sizeof(c)); c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port)); c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } struct intr_details { u32 mask; const char *msg; }; struct intr_action { u32 mask; int arg; bool (*action)(struct adapter *, int, bool); }; #define NONFATAL_IF_DISABLED 1 struct intr_info { const char *name; /* name of the INT_CAUSE register */ int cause_reg; /* INT_CAUSE register */ int enable_reg; /* INT_ENABLE register */ u32 fatal; /* bits that are fatal */ int flags; /* hints */ const struct intr_details *details; const struct intr_action *actions; }; static inline char intr_alert_char(u32 cause, u32 enable, u32 fatal) { if (cause & fatal) return ('!'); if (cause & enable) return ('*'); return ('-'); } static void t4_show_intr_info(struct adapter *adap, const struct intr_info *ii, u32 cause) { u32 enable, fatal, leftover; const struct intr_details *details; char alert; enable = t4_read_reg(adap, ii->enable_reg); if (ii->flags & NONFATAL_IF_DISABLED) fatal = ii->fatal & t4_read_reg(adap, ii->enable_reg); else fatal = ii->fatal; alert = intr_alert_char(cause, enable, fatal); CH_ALERT(adap, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n", alert, ii->name, ii->cause_reg, cause, enable, fatal); leftover = cause; for (details = ii->details; details && details->mask != 0; details++) { u32 msgbits = details->mask & cause; if (msgbits == 0) continue; alert = intr_alert_char(msgbits, enable, ii->fatal); CH_ALERT(adap, " %c [0x%08x] %s\n", alert, msgbits, details->msg); leftover &= ~msgbits; } if (leftover != 0 && leftover != cause) CH_ALERT(adap, " ? [0x%08x]\n", leftover); } /* * Returns true for fatal error. */ static bool t4_handle_intr(struct adapter *adap, const struct intr_info *ii, u32 additional_cause, bool verbose) { u32 cause, fatal; bool rc; const struct intr_action *action; /* * Read and display cause. Note that the top level PL_INT_CAUSE is a * bit special and we need to completely ignore the bits that are not in * PL_INT_ENABLE. */ cause = t4_read_reg(adap, ii->cause_reg); if (ii->cause_reg == A_PL_INT_CAUSE) cause &= t4_read_reg(adap, ii->enable_reg); if (verbose || cause != 0) t4_show_intr_info(adap, ii, cause); fatal = cause & ii->fatal; if (fatal != 0 && ii->flags & NONFATAL_IF_DISABLED) fatal &= t4_read_reg(adap, ii->enable_reg); cause |= additional_cause; if (cause == 0) return (false); rc = fatal != 0; for (action = ii->actions; action && action->mask != 0; action++) { if (!(action->mask & cause)) continue; rc |= (action->action)(adap, action->arg, verbose); } /* clear */ t4_write_reg(adap, ii->cause_reg, cause); (void)t4_read_reg(adap, ii->cause_reg); return (rc); } /* * Interrupt handler for the PCIE module. */ static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details sysbus_intr_details[] = { { F_RNPP, "RXNP array parity error" }, { F_RPCP, "RXPC array parity error" }, { F_RCIP, "RXCIF array parity error" }, { F_RCCP, "Rx completions control array parity error" }, { F_RFTP, "RXFT array parity error" }, { 0 } }; static const struct intr_info sysbus_intr_info = { .name = "PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS", .cause_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, .enable_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_INTERRUPT_ENABLE, .fatal = F_RFTP | F_RCCP | F_RCIP | F_RPCP | F_RNPP, .flags = 0, .details = sysbus_intr_details, .actions = NULL, }; static const struct intr_details pcie_port_intr_details[] = { { F_TPCP, "TXPC array parity error" }, { F_TNPP, "TXNP array parity error" }, { F_TFTP, "TXFT array parity error" }, { F_TCAP, "TXCA array parity error" }, { F_TCIP, "TXCIF array parity error" }, { F_RCAP, "RXCA array parity error" }, { F_OTDD, "outbound request TLP discarded" }, { F_RDPE, "Rx data parity error" }, { F_TDUE, "Tx uncorrectable data error" }, { 0 } }; static const struct intr_info pcie_port_intr_info = { .name = "PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS", .cause_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, .enable_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_INTERRUPT_ENABLE, .fatal = F_TPCP | F_TNPP | F_TFTP | F_TCAP | F_TCIP | F_RCAP | F_OTDD | F_RDPE | F_TDUE, .flags = 0, .details = pcie_port_intr_details, .actions = NULL, }; static const struct intr_details pcie_intr_details[] = { { F_MSIADDRLPERR, "MSI AddrL parity error" }, { F_MSIADDRHPERR, "MSI AddrH parity error" }, { F_MSIDATAPERR, "MSI data parity error" }, { F_MSIXADDRLPERR, "MSI-X AddrL parity error" }, { F_MSIXADDRHPERR, "MSI-X AddrH parity error" }, { F_MSIXDATAPERR, "MSI-X data parity error" }, { F_MSIXDIPERR, "MSI-X DI parity error" }, { F_PIOCPLPERR, "PCIe PIO completion FIFO parity error" }, { F_PIOREQPERR, "PCIe PIO request FIFO parity error" }, { F_TARTAGPERR, "PCIe target tag FIFO parity error" }, { F_CCNTPERR, "PCIe CMD channel count parity error" }, { F_CREQPERR, "PCIe CMD channel request parity error" }, { F_CRSPPERR, "PCIe CMD channel response parity error" }, { F_DCNTPERR, "PCIe DMA channel count parity error" }, { F_DREQPERR, "PCIe DMA channel request parity error" }, { F_DRSPPERR, "PCIe DMA channel response parity error" }, { F_HCNTPERR, "PCIe HMA channel count parity error" }, { F_HREQPERR, "PCIe HMA channel request parity error" }, { F_HRSPPERR, "PCIe HMA channel response parity error" }, { F_CFGSNPPERR, "PCIe config snoop FIFO parity error" }, { F_FIDPERR, "PCIe FID parity error" }, { F_INTXCLRPERR, "PCIe INTx clear parity error" }, { F_MATAGPERR, "PCIe MA tag parity error" }, { F_PIOTAGPERR, "PCIe PIO tag parity error" }, { F_RXCPLPERR, "PCIe Rx completion parity error" }, { F_RXWRPERR, "PCIe Rx write parity error" }, { F_RPLPERR, "PCIe replay buffer parity error" }, { F_PCIESINT, "PCIe core secondary fault" }, { F_PCIEPINT, "PCIe core primary fault" }, { F_UNXSPLCPLERR, "PCIe unexpected split completion error" }, { 0 } }; static const struct intr_details t5_pcie_intr_details[] = { { F_IPGRPPERR, "Parity errors observed by IP" }, { F_NONFATALERR, "PCIe non-fatal error" }, { F_READRSPERR, "Outbound read error" }, { F_TRGT1GRPPERR, "PCIe TRGT1 group FIFOs parity error" }, { F_IPSOTPERR, "PCIe IP SOT buffer SRAM parity error" }, { F_IPRETRYPERR, "PCIe IP replay buffer parity error" }, { F_IPRXDATAGRPPERR, "PCIe IP Rx data group SRAMs parity error" }, { F_IPRXHDRGRPPERR, "PCIe IP Rx header group SRAMs parity error" }, { F_PIOTAGQPERR, "PIO tag queue FIFO parity error" }, { F_MAGRPPERR, "MA group FIFO parity error" }, { F_VFIDPERR, "VFID SRAM parity error" }, { F_FIDPERR, "FID SRAM parity error" }, { F_CFGSNPPERR, "config snoop FIFO parity error" }, { F_HRSPPERR, "HMA channel response data SRAM parity error" }, { F_HREQRDPERR, "HMA channel read request SRAM parity error" }, { F_HREQWRPERR, "HMA channel write request SRAM parity error" }, { F_DRSPPERR, "DMA channel response data SRAM parity error" }, { F_DREQRDPERR, "DMA channel write request SRAM parity error" }, { F_CRSPPERR, "CMD channel response data SRAM parity error" }, { F_CREQRDPERR, "CMD channel read request SRAM parity error" }, { F_MSTTAGQPERR, "PCIe master tag queue SRAM parity error" }, { F_TGTTAGQPERR, "PCIe target tag queue FIFO parity error" }, { F_PIOREQGRPPERR, "PIO request group FIFOs parity error" }, { F_PIOCPLGRPPERR, "PIO completion group FIFOs parity error" }, { F_MSIXDIPERR, "MSI-X DI SRAM parity error" }, { F_MSIXDATAPERR, "MSI-X data SRAM parity error" }, { F_MSIXADDRHPERR, "MSI-X AddrH SRAM parity error" }, { F_MSIXADDRLPERR, "MSI-X AddrL SRAM parity error" }, { F_MSIXSTIPERR, "MSI-X STI SRAM parity error" }, { F_MSTTIMEOUTPERR, "Master timeout FIFO parity error" }, { F_MSTGRPPERR, "Master response read queue SRAM parity error" }, { 0 } }; struct intr_info pcie_intr_info = { .name = "PCIE_INT_CAUSE", .cause_reg = A_PCIE_INT_CAUSE, .enable_reg = A_PCIE_INT_ENABLE, .fatal = 0xffffffff, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = NULL, }; bool fatal = false; if (is_t4(adap)) { fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, verbose); pcie_intr_info.details = pcie_intr_details; } else { pcie_intr_info.details = t5_pcie_intr_details; } fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, verbose); return (fatal); } /* * TP interrupt handler. */ static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details tp_intr_details[] = { { 0x3fffffff, "TP parity error" }, { F_FLMTXFLSTEMPTY, "TP out of Tx pages" }, { 0 } }; static const struct intr_info tp_intr_info = { .name = "TP_INT_CAUSE", .cause_reg = A_TP_INT_CAUSE, .enable_reg = A_TP_INT_ENABLE, .fatal = 0x7fffffff, .flags = NONFATAL_IF_DISABLED, .details = tp_intr_details, .actions = NULL, }; return (t4_handle_intr(adap, &tp_intr_info, 0, verbose)); } /* * SGE interrupt handler. */ static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_info sge_int1_info = { .name = "SGE_INT_CAUSE1", .cause_reg = A_SGE_INT_CAUSE1, .enable_reg = A_SGE_INT_ENABLE1, .fatal = 0xffffffff, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = NULL, }; static const struct intr_info sge_int2_info = { .name = "SGE_INT_CAUSE2", .cause_reg = A_SGE_INT_CAUSE2, .enable_reg = A_SGE_INT_ENABLE2, .fatal = 0xffffffff, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = NULL, }; static const struct intr_details sge_int3_details[] = { { F_ERR_FLM_DBP, "DBP pointer delivery for invalid context or QID" }, { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0, "Invalid QID or header request by IDMA" }, { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" }, { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" }, { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" }, { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" }, { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" }, { F_ERR_TIMER_ABOVE_MAX_QID, "SGE GTS with timer 0-5 for IQID > 1023" }, { F_ERR_CPL_EXCEED_IQE_SIZE, "SGE received CPL exceeding IQE size" }, { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" }, { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" }, { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" }, { F_ERR_DROPPED_DB, "SGE DB dropped" }, { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, "SGE IQID > 1023 received CPL for FL" }, { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" }, { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" }, { F_ERR_ING_CTXT_PRIO, "Ingress context manager priority user error" }, { F_ERR_EGR_CTXT_PRIO, "Egress context manager priority user error" }, { F_DBFIFO_HP_INT, "High priority DB FIFO threshold reached" }, { F_DBFIFO_LP_INT, "Low priority DB FIFO threshold reached" }, { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" }, { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" }, { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" }, { 0x0000000f, "SGE context access for invalid queue" }, { 0 } }; static const struct intr_details t6_sge_int3_details[] = { { F_ERR_FLM_DBP, "DBP pointer delivery for invalid context or QID" }, { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0, "Invalid QID or header request by IDMA" }, { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" }, { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" }, { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" }, { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" }, { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" }, { F_ERR_TIMER_ABOVE_MAX_QID, "SGE GTS with timer 0-5 for IQID > 1023" }, { F_ERR_CPL_EXCEED_IQE_SIZE, "SGE received CPL exceeding IQE size" }, { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" }, { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" }, { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" }, { F_ERR_DROPPED_DB, "SGE DB dropped" }, { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, "SGE IQID > 1023 received CPL for FL" }, { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" }, { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" }, { F_ERR_ING_CTXT_PRIO, "Ingress context manager priority user error" }, { F_ERR_EGR_CTXT_PRIO, "Egress context manager priority user error" }, { F_DBP_TBUF_FULL, "SGE DBP tbuf full" }, { F_FATAL_WRE_LEN, "SGE WRE packet less than advertized length" }, { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" }, { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" }, { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" }, { 0x0000000f, "SGE context access for invalid queue" }, { 0 } }; struct intr_info sge_int3_info = { .name = "SGE_INT_CAUSE3", .cause_reg = A_SGE_INT_CAUSE3, .enable_reg = A_SGE_INT_ENABLE3, .fatal = F_ERR_CPL_EXCEED_IQE_SIZE, .flags = 0, .details = NULL, .actions = NULL, }; static const struct intr_info sge_int4_info = { .name = "SGE_INT_CAUSE4", .cause_reg = A_SGE_INT_CAUSE4, .enable_reg = A_SGE_INT_ENABLE4, .fatal = 0, .flags = 0, .details = NULL, .actions = NULL, }; static const struct intr_info sge_int5_info = { .name = "SGE_INT_CAUSE5", .cause_reg = A_SGE_INT_CAUSE5, .enable_reg = A_SGE_INT_ENABLE5, .fatal = 0xffffffff, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = NULL, }; static const struct intr_info sge_int6_info = { .name = "SGE_INT_CAUSE6", .cause_reg = A_SGE_INT_CAUSE6, .enable_reg = A_SGE_INT_ENABLE6, .fatal = 0, .flags = 0, .details = NULL, .actions = NULL, }; bool fatal; u32 v; if (chip_id(adap) <= CHELSIO_T5) { sge_int3_info.details = sge_int3_details; } else { sge_int3_info.details = t6_sge_int3_details; } fatal = false; fatal |= t4_handle_intr(adap, &sge_int1_info, 0, verbose); fatal |= t4_handle_intr(adap, &sge_int2_info, 0, verbose); fatal |= t4_handle_intr(adap, &sge_int3_info, 0, verbose); fatal |= t4_handle_intr(adap, &sge_int4_info, 0, verbose); if (chip_id(adap) >= CHELSIO_T5) fatal |= t4_handle_intr(adap, &sge_int5_info, 0, verbose); if (chip_id(adap) >= CHELSIO_T6) fatal |= t4_handle_intr(adap, &sge_int6_info, 0, verbose); v = t4_read_reg(adap, A_SGE_ERROR_STATS); if (v & F_ERROR_QID_VALID) { CH_ERR(adap, "SGE error for QID %u\n", G_ERROR_QID(v)); if (v & F_UNCAPTURED_ERROR) CH_ERR(adap, "SGE UNCAPTURED_ERROR set (clearing)\n"); t4_write_reg(adap, A_SGE_ERROR_STATS, F_ERROR_QID_VALID | F_UNCAPTURED_ERROR); } return (fatal); } /* * CIM interrupt handler. */ static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_action cim_host_intr_actions[] = { { F_TIMER0INT, 0, t4_os_dump_cimla }, { 0 }, }; static const struct intr_details cim_host_intr_details[] = { /* T6+ */ { F_PCIE2CIMINTFPARERR, "CIM IBQ PCIe interface parity error" }, /* T5+ */ { F_MA_CIM_INTFPERR, "MA2CIM interface parity error" }, { F_PLCIM_MSTRSPDATAPARERR, "PL2CIM master response data parity error" }, { F_NCSI2CIMINTFPARERR, "CIM IBQ NC-SI interface parity error" }, { F_SGE2CIMINTFPARERR, "CIM IBQ SGE interface parity error" }, { F_ULP2CIMINTFPARERR, "CIM IBQ ULP_TX interface parity error" }, { F_TP2CIMINTFPARERR, "CIM IBQ TP interface parity error" }, { F_OBQSGERX1PARERR, "CIM OBQ SGE1_RX parity error" }, { F_OBQSGERX0PARERR, "CIM OBQ SGE0_RX parity error" }, /* T4+ */ { F_TIEQOUTPARERRINT, "CIM TIEQ outgoing FIFO parity error" }, { F_TIEQINPARERRINT, "CIM TIEQ incoming FIFO parity error" }, { F_MBHOSTPARERR, "CIM mailbox host read parity error" }, { F_MBUPPARERR, "CIM mailbox uP parity error" }, { F_IBQTP0PARERR, "CIM IBQ TP0 parity error" }, { F_IBQTP1PARERR, "CIM IBQ TP1 parity error" }, { F_IBQULPPARERR, "CIM IBQ ULP parity error" }, { F_IBQSGELOPARERR, "CIM IBQ SGE_LO parity error" }, { F_IBQSGEHIPARERR | F_IBQPCIEPARERR, /* same bit */ "CIM IBQ PCIe/SGE_HI parity error" }, { F_IBQNCSIPARERR, "CIM IBQ NC-SI parity error" }, { F_OBQULP0PARERR, "CIM OBQ ULP0 parity error" }, { F_OBQULP1PARERR, "CIM OBQ ULP1 parity error" }, { F_OBQULP2PARERR, "CIM OBQ ULP2 parity error" }, { F_OBQULP3PARERR, "CIM OBQ ULP3 parity error" }, { F_OBQSGEPARERR, "CIM OBQ SGE parity error" }, { F_OBQNCSIPARERR, "CIM OBQ NC-SI parity error" }, { F_TIMER1INT, "CIM TIMER0 interrupt" }, { F_TIMER0INT, "CIM TIMER0 interrupt" }, { F_PREFDROPINT, "CIM control register prefetch drop" }, { 0} }; static const struct intr_info cim_host_intr_info = { .name = "CIM_HOST_INT_CAUSE", .cause_reg = A_CIM_HOST_INT_CAUSE, .enable_reg = A_CIM_HOST_INT_ENABLE, .fatal = 0x007fffe6, .flags = NONFATAL_IF_DISABLED, .details = cim_host_intr_details, .actions = cim_host_intr_actions, }; static const struct intr_details cim_host_upacc_intr_details[] = { { F_EEPROMWRINT, "CIM EEPROM came out of busy state" }, { F_TIMEOUTMAINT, "CIM PIF MA timeout" }, { F_TIMEOUTINT, "CIM PIF timeout" }, { F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite" }, { F_REQOVRLOOKUPINT, "CIM request FIFO overwrite" }, { F_BLKWRPLINT, "CIM block write to PL space" }, { F_BLKRDPLINT, "CIM block read from PL space" }, { F_SGLWRPLINT, "CIM single write to PL space with illegal BEs" }, { F_SGLRDPLINT, "CIM single read from PL space with illegal BEs" }, { F_BLKWRCTLINT, "CIM block write to CTL space" }, { F_BLKRDCTLINT, "CIM block read from CTL space" }, { F_SGLWRCTLINT, "CIM single write to CTL space with illegal BEs" }, { F_SGLRDCTLINT, "CIM single read from CTL space with illegal BEs" }, { F_BLKWREEPROMINT, "CIM block write to EEPROM space" }, { F_BLKRDEEPROMINT, "CIM block read from EEPROM space" }, { F_SGLWREEPROMINT, "CIM single write to EEPROM space with illegal BEs" }, { F_SGLRDEEPROMINT, "CIM single read from EEPROM space with illegal BEs" }, { F_BLKWRFLASHINT, "CIM block write to flash space" }, { F_BLKRDFLASHINT, "CIM block read from flash space" }, { F_SGLWRFLASHINT, "CIM single write to flash space" }, { F_SGLRDFLASHINT, "CIM single read from flash space with illegal BEs" }, { F_BLKWRBOOTINT, "CIM block write to boot space" }, { F_BLKRDBOOTINT, "CIM block read from boot space" }, { F_SGLWRBOOTINT, "CIM single write to boot space" }, { F_SGLRDBOOTINT, "CIM single read from boot space with illegal BEs" }, { F_ILLWRBEINT, "CIM illegal write BEs" }, { F_ILLRDBEINT, "CIM illegal read BEs" }, { F_ILLRDINT, "CIM illegal read" }, { F_ILLWRINT, "CIM illegal write" }, { F_ILLTRANSINT, "CIM illegal transaction" }, { F_RSVDSPACEINT, "CIM reserved space access" }, {0} }; static const struct intr_info cim_host_upacc_intr_info = { .name = "CIM_HOST_UPACC_INT_CAUSE", .cause_reg = A_CIM_HOST_UPACC_INT_CAUSE, .enable_reg = A_CIM_HOST_UPACC_INT_ENABLE, .fatal = 0x3fffeeff, .flags = NONFATAL_IF_DISABLED, .details = cim_host_upacc_intr_details, .actions = NULL, }; static const struct intr_info cim_pf_host_intr_info = { .name = "CIM_PF_HOST_INT_CAUSE", .cause_reg = MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), .enable_reg = MYPF_REG(A_CIM_PF_HOST_INT_ENABLE), .fatal = 0, .flags = 0, .details = NULL, .actions = NULL, }; u32 val, fw_err; bool fatal; fw_err = t4_read_reg(adap, A_PCIE_FW); if (fw_err & F_PCIE_FW_ERR) t4_report_fw_error(adap); /* * When the Firmware detects an internal error which normally wouldn't * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order * to make sure the Host sees the Firmware Crash. So if we have a * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0 * interrupt. */ val = t4_read_reg(adap, A_CIM_HOST_INT_CAUSE); if (val & F_TIMER0INT && (!(fw_err & F_PCIE_FW_ERR) || G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) { t4_write_reg(adap, A_CIM_HOST_INT_CAUSE, F_TIMER0INT); } fatal = false; fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, verbose); return (fatal); } /* * ULP RX interrupt handler. */ static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details ulprx_intr_details[] = { /* T5+ */ { F_SE_CNT_MISMATCH_1, "ULPRX SE count mismatch in channel 1" }, { F_SE_CNT_MISMATCH_0, "ULPRX SE count mismatch in channel 0" }, /* T4+ */ { F_CAUSE_CTX_1, "ULPRX channel 1 context error" }, { F_CAUSE_CTX_0, "ULPRX channel 0 context error" }, { 0x007fffff, "ULPRX parity error" }, { 0 } }; static const struct intr_info ulprx_intr_info = { .name = "ULP_RX_INT_CAUSE", .cause_reg = A_ULP_RX_INT_CAUSE, .enable_reg = A_ULP_RX_INT_ENABLE, .fatal = 0x07ffffff, .flags = NONFATAL_IF_DISABLED, .details = ulprx_intr_details, .actions = NULL, }; static const struct intr_info ulprx_intr2_info = { .name = "ULP_RX_INT_CAUSE_2", .cause_reg = A_ULP_RX_INT_CAUSE_2, .enable_reg = A_ULP_RX_INT_ENABLE_2, .fatal = 0, .flags = 0, .details = NULL, .actions = NULL, }; bool fatal = false; fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, verbose); return (fatal); } /* * ULP TX interrupt handler. */ static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details ulptx_intr_details[] = { { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds" }, { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds" }, { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds" }, { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds" }, { 0x0fffffff, "ULPTX parity error" }, { 0 } }; static const struct intr_info ulptx_intr_info = { .name = "ULP_TX_INT_CAUSE", .cause_reg = A_ULP_TX_INT_CAUSE, .enable_reg = A_ULP_TX_INT_ENABLE, .fatal = 0x0fffffff, .flags = NONFATAL_IF_DISABLED, .details = ulptx_intr_details, .actions = NULL, }; static const struct intr_info ulptx_intr2_info = { .name = "ULP_TX_INT_CAUSE_2", .cause_reg = A_ULP_TX_INT_CAUSE_2, .enable_reg = A_ULP_TX_INT_ENABLE_2, .fatal = 0xf0, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = NULL, }; bool fatal = false; fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &ulptx_intr2_info, 0, verbose); return (fatal); } static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose) { int i; u32 data[17]; t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0], ARRAY_SIZE(data), A_PM_TX_DBG_STAT0); for (i = 0; i < ARRAY_SIZE(data); i++) { CH_ALERT(adap, " - PM_TX_DBG_STAT%u (0x%x) = 0x%08x\n", i, A_PM_TX_DBG_STAT0 + i, data[i]); } return (false); } /* * PM TX interrupt handler. */ static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_action pmtx_intr_actions[] = { { 0xffffffff, 0, pmtx_dump_dbg_stats }, { 0 }, }; static const struct intr_details pmtx_intr_details[] = { { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large" }, { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large" }, { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large" }, { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd" }, { 0x0f000000, "PMTX icspi FIFO2X Rx framing error" }, { 0x00f00000, "PMTX icspi FIFO Rx framing error" }, { 0x000f0000, "PMTX icspi FIFO Tx framing error" }, { 0x0000f000, "PMTX oespi FIFO Rx framing error" }, { 0x00000f00, "PMTX oespi FIFO Tx framing error" }, { 0x000000f0, "PMTX oespi FIFO2X Tx framing error" }, { F_OESPI_PAR_ERROR, "PMTX oespi parity error" }, { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error" }, { F_ICSPI_PAR_ERROR, "PMTX icspi parity error" }, { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error" }, { 0 } }; static const struct intr_info pmtx_intr_info = { .name = "PM_TX_INT_CAUSE", .cause_reg = A_PM_TX_INT_CAUSE, .enable_reg = A_PM_TX_INT_ENABLE, .fatal = 0xffffffff, .flags = 0, .details = pmtx_intr_details, .actions = pmtx_intr_actions, }; return (t4_handle_intr(adap, &pmtx_intr_info, 0, verbose)); } /* * PM RX interrupt handler. */ static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details pmrx_intr_details[] = { /* T6+ */ { 0x18000000, "PMRX ospi overflow" }, { F_MA_INTF_SDC_ERR, "PMRX MA interface SDC parity error" }, { F_BUNDLE_LEN_PARERR, "PMRX bundle len FIFO parity error" }, { F_BUNDLE_LEN_OVFL, "PMRX bundle len FIFO overflow" }, { F_SDC_ERR, "PMRX SDC error" }, /* T4+ */ { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd" }, { 0x003c0000, "PMRX iespi FIFO2X Rx framing error" }, { 0x0003c000, "PMRX iespi Rx framing error" }, { 0x00003c00, "PMRX iespi Tx framing error" }, { 0x00000300, "PMRX ocspi Rx framing error" }, { 0x000000c0, "PMRX ocspi Tx framing error" }, { 0x00000030, "PMRX ocspi FIFO2X Tx framing error" }, { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error" }, { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error" }, { F_IESPI_PAR_ERROR, "PMRX iespi parity error" }, { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error"}, { 0 } }; static const struct intr_info pmrx_intr_info = { .name = "PM_RX_INT_CAUSE", .cause_reg = A_PM_RX_INT_CAUSE, .enable_reg = A_PM_RX_INT_ENABLE, .fatal = 0x1fffffff, .flags = NONFATAL_IF_DISABLED, .details = pmrx_intr_details, .actions = NULL, }; return (t4_handle_intr(adap, &pmrx_intr_info, 0, verbose)); } /* * CPL switch interrupt handler. */ static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details cplsw_intr_details[] = { /* T5+ */ { F_PERR_CPL_128TO128_1, "CPLSW 128TO128 FIFO1 parity error" }, { F_PERR_CPL_128TO128_0, "CPLSW 128TO128 FIFO0 parity error" }, /* T4+ */ { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error" }, { F_CIM_OVFL_ERROR, "CPLSW CIM overflow" }, { F_TP_FRAMING_ERROR, "CPLSW TP framing error" }, { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error" }, { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error" }, { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error" }, { 0 } }; static const struct intr_info cplsw_intr_info = { .name = "CPL_INTR_CAUSE", .cause_reg = A_CPL_INTR_CAUSE, .enable_reg = A_CPL_INTR_ENABLE, .fatal = 0xff, .flags = NONFATAL_IF_DISABLED, .details = cplsw_intr_details, .actions = NULL, }; return (t4_handle_intr(adap, &cplsw_intr_info, 0, verbose)); } #define T4_LE_FATAL_MASK (F_PARITYERR | F_UNKNOWNCMD | F_REQQPARERR) #define T5_LE_FATAL_MASK (T4_LE_FATAL_MASK | F_VFPARERR) #define T6_LE_PERRCRC_MASK (F_PIPELINEERR | F_CLIPTCAMACCFAIL | \ F_SRVSRAMACCFAIL | F_CLCAMCRCPARERR | F_CLCAMINTPERR | F_SSRAMINTPERR | \ F_SRVSRAMPERR | F_VFSRAMPERR | F_TCAMINTPERR | F_TCAMCRCERR | \ F_HASHTBLMEMACCERR | F_MAIFWRINTPERR | F_HASHTBLMEMCRCERR) #define T6_LE_FATAL_MASK (T6_LE_PERRCRC_MASK | F_T6_UNKNOWNCMD | \ F_TCAMACCFAIL | F_HASHTBLACCFAIL | F_CMDTIDERR | F_CMDPRSRINTERR | \ F_TOTCNTERR | F_CLCAMFIFOERR | F_CLIPSUBERR) /* * LE interrupt handler. */ static bool le_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details le_intr_details[] = { { F_REQQPARERR, "LE request queue parity error" }, { F_UNKNOWNCMD, "LE unknown command" }, { F_ACTRGNFULL, "LE active region full" }, { F_PARITYERR, "LE parity error" }, { F_LIPMISS, "LE LIP miss" }, { F_LIP0, "LE 0 LIP error" }, { 0 } }; static const struct intr_details t6_le_intr_details[] = { { F_CLIPSUBERR, "LE CLIP CAM reverse substitution error" }, { F_CLCAMFIFOERR, "LE CLIP CAM internal FIFO error" }, { F_CTCAMINVLDENT, "Invalid IPv6 CLIP TCAM entry" }, { F_TCAMINVLDENT, "Invalid IPv6 TCAM entry" }, { F_TOTCNTERR, "LE total active < TCAM count" }, { F_CMDPRSRINTERR, "LE internal error in parser" }, { F_CMDTIDERR, "Incorrect tid in LE command" }, { F_T6_ACTRGNFULL, "LE active region full" }, { F_T6_ACTCNTIPV6TZERO, "LE IPv6 active open TCAM counter -ve" }, { F_T6_ACTCNTIPV4TZERO, "LE IPv4 active open TCAM counter -ve" }, { F_T6_ACTCNTIPV6ZERO, "LE IPv6 active open counter -ve" }, { F_T6_ACTCNTIPV4ZERO, "LE IPv4 active open counter -ve" }, { F_HASHTBLACCFAIL, "Hash table read error (proto conflict)" }, { F_TCAMACCFAIL, "LE TCAM access failure" }, { F_T6_UNKNOWNCMD, "LE unknown command" }, { F_T6_LIP0, "LE found 0 LIP during CLIP substitution" }, { F_T6_LIPMISS, "LE CLIP lookup miss" }, { T6_LE_PERRCRC_MASK, "LE parity/CRC error" }, { 0 } }; struct intr_info le_intr_info = { .name = "LE_DB_INT_CAUSE", .cause_reg = A_LE_DB_INT_CAUSE, .enable_reg = A_LE_DB_INT_ENABLE, .fatal = 0, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = NULL, }; if (chip_id(adap) <= CHELSIO_T5) { le_intr_info.details = le_intr_details; le_intr_info.fatal = T5_LE_FATAL_MASK; } else { le_intr_info.details = t6_le_intr_details; le_intr_info.fatal = T6_LE_FATAL_MASK; } return (t4_handle_intr(adap, &le_intr_info, 0, verbose)); } /* * MPS interrupt handler. */ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details mps_rx_perr_intr_details[] = { { 0xffffffff, "MPS Rx parity error" }, { 0 } }; static const struct intr_info mps_rx_perr_intr_info = { .name = "MPS_RX_PERR_INT_CAUSE", .cause_reg = A_MPS_RX_PERR_INT_CAUSE, .enable_reg = A_MPS_RX_PERR_INT_ENABLE, .fatal = 0xffffffff, .flags = NONFATAL_IF_DISABLED, .details = mps_rx_perr_intr_details, .actions = NULL, }; static const struct intr_details mps_tx_intr_details[] = { { F_PORTERR, "MPS Tx destination port is disabled" }, { F_FRMERR, "MPS Tx framing error" }, { F_SECNTERR, "MPS Tx SOP/EOP error" }, { F_BUBBLE, "MPS Tx underflow" }, { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error" }, { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error" }, { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error" }, { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error" }, { 0 } }; static const struct intr_info mps_tx_intr_info = { .name = "MPS_TX_INT_CAUSE", .cause_reg = A_MPS_TX_INT_CAUSE, .enable_reg = A_MPS_TX_INT_ENABLE, .fatal = 0x1ffff, .flags = NONFATAL_IF_DISABLED, .details = mps_tx_intr_details, .actions = NULL, }; static const struct intr_details mps_trc_intr_details[] = { { F_MISCPERR, "MPS TRC misc parity error" }, { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error" }, { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error" }, { 0 } }; static const struct intr_info mps_trc_intr_info = { .name = "MPS_TRC_INT_CAUSE", .cause_reg = A_MPS_TRC_INT_CAUSE, .enable_reg = A_MPS_TRC_INT_ENABLE, .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM), .flags = 0, .details = mps_trc_intr_details, .actions = NULL, }; static const struct intr_details mps_stat_sram_intr_details[] = { { 0xffffffff, "MPS statistics SRAM parity error" }, { 0 } }; static const struct intr_info mps_stat_sram_intr_info = { .name = "MPS_STAT_PERR_INT_CAUSE_SRAM", .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM, .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM, .fatal = 0x1fffffff, .flags = NONFATAL_IF_DISABLED, .details = mps_stat_sram_intr_details, .actions = NULL, }; static const struct intr_details mps_stat_tx_intr_details[] = { { 0xffffff, "MPS statistics Tx FIFO parity error" }, { 0 } }; static const struct intr_info mps_stat_tx_intr_info = { .name = "MPS_STAT_PERR_INT_CAUSE_TX_FIFO", .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO, .fatal = 0xffffff, .flags = NONFATAL_IF_DISABLED, .details = mps_stat_tx_intr_details, .actions = NULL, }; static const struct intr_details mps_stat_rx_intr_details[] = { { 0xffffff, "MPS statistics Rx FIFO parity error" }, { 0 } }; static const struct intr_info mps_stat_rx_intr_info = { .name = "MPS_STAT_PERR_INT_CAUSE_RX_FIFO", .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_RX_FIFO, .fatal = 0xffffff, .flags = 0, .details = mps_stat_rx_intr_details, .actions = NULL, }; static const struct intr_details mps_cls_intr_details[] = { { F_HASHSRAM, "MPS hash SRAM parity error" }, { F_MATCHTCAM, "MPS match TCAM parity error" }, { F_MATCHSRAM, "MPS match SRAM parity error" }, { 0 } }; static const struct intr_info mps_cls_intr_info = { .name = "MPS_CLS_INT_CAUSE", .cause_reg = A_MPS_CLS_INT_CAUSE, .enable_reg = A_MPS_CLS_INT_ENABLE, .fatal = F_MATCHSRAM | F_MATCHTCAM | F_HASHSRAM, .flags = 0, .details = mps_cls_intr_details, .actions = NULL, }; static const struct intr_details mps_stat_sram1_intr_details[] = { { 0xff, "MPS statistics SRAM1 parity error" }, { 0 } }; static const struct intr_info mps_stat_sram1_intr_info = { .name = "MPS_STAT_PERR_INT_CAUSE_SRAM1", .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM1, .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM1, .fatal = 0xff, .flags = 0, .details = mps_stat_sram1_intr_details, .actions = NULL, }; bool fatal; fatal = false; fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, verbose); if (chip_id(adap) > CHELSIO_T4) { fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0, verbose); } t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff); t4_read_reg(adap, A_MPS_INT_CAUSE); /* flush */ return (fatal); } /* * EDC/MC interrupt handler. */ static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose) { static const char name[4][5] = { "EDC0", "EDC1", "MC0", "MC1" }; unsigned int count_reg, v; static const struct intr_details mem_intr_details[] = { { F_ECC_UE_INT_CAUSE, "Uncorrectable ECC data error(s)" }, { F_ECC_CE_INT_CAUSE, "Correctable ECC data error(s)" }, { F_PERR_INT_CAUSE, "FIFO parity error" }, { 0 } }; struct intr_info ii = { .fatal = F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE, .details = mem_intr_details, .flags = 0, .actions = NULL, }; bool fatal; switch (idx) { case MEM_EDC0: ii.name = "EDC0_INT_CAUSE"; ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 0); ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 0); count_reg = EDC_REG(A_EDC_ECC_STATUS, 0); break; case MEM_EDC1: ii.name = "EDC1_INT_CAUSE"; ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 1); ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 1); count_reg = EDC_REG(A_EDC_ECC_STATUS, 1); break; case MEM_MC0: ii.name = "MC0_INT_CAUSE"; if (is_t4(adap)) { ii.cause_reg = A_MC_INT_CAUSE; ii.enable_reg = A_MC_INT_ENABLE; count_reg = A_MC_ECC_STATUS; } else { ii.cause_reg = A_MC_P_INT_CAUSE; ii.enable_reg = A_MC_P_INT_ENABLE; count_reg = A_MC_P_ECC_STATUS; } break; case MEM_MC1: ii.name = "MC1_INT_CAUSE"; ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, 1); ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, 1); count_reg = MC_REG(A_MC_P_ECC_STATUS, 1); break; } fatal = t4_handle_intr(adap, &ii, 0, verbose); v = t4_read_reg(adap, count_reg); if (v != 0) { if (G_ECC_UECNT(v) != 0) { CH_ALERT(adap, "%s: %u uncorrectable ECC data error(s)\n", name[idx], G_ECC_UECNT(v)); } if (G_ECC_CECNT(v) != 0) { if (idx <= MEM_EDC1) t4_edc_err_read(adap, idx); CH_WARN_RATELIMIT(adap, "%s: %u correctable ECC data error(s)\n", name[idx], G_ECC_CECNT(v)); } t4_write_reg(adap, count_reg, 0xffffffff); } return (fatal); } static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose) { u32 v; v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS); CH_ALERT(adap, "MA address wrap-around error by client %u to address %#x\n", G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4); t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v); return (false); } /* * MA interrupt handler. */ static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_action ma_intr_actions[] = { { F_MEM_WRAP_INT_CAUSE, 0, ma_wrap_status }, { 0 }, }; static const struct intr_info ma_intr_info = { .name = "MA_INT_CAUSE", .cause_reg = A_MA_INT_CAUSE, .enable_reg = A_MA_INT_ENABLE, .fatal = F_MEM_PERR_INT_CAUSE | F_MEM_TO_INT_CAUSE, .flags = NONFATAL_IF_DISABLED, .details = NULL, .actions = ma_intr_actions, }; static const struct intr_info ma_perr_status1 = { .name = "MA_PARITY_ERROR_STATUS1", .cause_reg = A_MA_PARITY_ERROR_STATUS1, .enable_reg = A_MA_PARITY_ERROR_ENABLE1, .fatal = 0xffffffff, .flags = 0, .details = NULL, .actions = NULL, }; static const struct intr_info ma_perr_status2 = { .name = "MA_PARITY_ERROR_STATUS2", .cause_reg = A_MA_PARITY_ERROR_STATUS2, .enable_reg = A_MA_PARITY_ERROR_ENABLE2, .fatal = 0xffffffff, .flags = 0, .details = NULL, .actions = NULL, }; bool fatal; fatal = false; fatal |= t4_handle_intr(adap, &ma_intr_info, 0, verbose); fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, verbose); if (chip_id(adap) > CHELSIO_T4) fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, verbose); return (fatal); } /* * SMB interrupt handler. */ static bool smb_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details smb_intr_details[] = { { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error" }, { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error" }, { F_SLVFIFOPARINT, "SMB slave FIFO parity error" }, { 0 } }; static const struct intr_info smb_intr_info = { .name = "SMB_INT_CAUSE", .cause_reg = A_SMB_INT_CAUSE, .enable_reg = A_SMB_INT_ENABLE, .fatal = F_SLVFIFOPARINT | F_MSTRXFIFOPARINT | F_MSTTXFIFOPARINT, .flags = 0, .details = smb_intr_details, .actions = NULL, }; return (t4_handle_intr(adap, &smb_intr_info, 0, verbose)); } /* * NC-SI interrupt handler. */ static bool ncsi_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details ncsi_intr_details[] = { { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error" }, { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error" }, { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error" }, { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error" }, { 0 } }; static const struct intr_info ncsi_intr_info = { .name = "NCSI_INT_CAUSE", .cause_reg = A_NCSI_INT_CAUSE, .enable_reg = A_NCSI_INT_ENABLE, .fatal = F_RXFIFO_PRTY_ERR | F_TXFIFO_PRTY_ERR | F_MPS_DM_PRTY_ERR | F_CIM_DM_PRTY_ERR, .flags = 0, .details = ncsi_intr_details, .actions = NULL, }; return (t4_handle_intr(adap, &ncsi_intr_info, 0, verbose)); } /* * MAC interrupt handler. */ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose) { static const struct intr_details mac_intr_details[] = { { F_TXFIFO_PRTY_ERR, "MAC Tx FIFO parity error" }, { F_RXFIFO_PRTY_ERR, "MAC Rx FIFO parity error" }, { 0 } }; char name[32]; struct intr_info ii; bool fatal = false; if (is_t4(adap)) { snprintf(name, sizeof(name), "XGMAC_PORT%u_INT_CAUSE", port); ii.name = &name[0]; ii.cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); ii.enable_reg = PORT_REG(port, A_XGMAC_PORT_INT_EN); ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR; ii.flags = 0; ii.details = mac_intr_details; ii.actions = NULL; } else { snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port); ii.name = &name[0]; ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_INT_EN); ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR; ii.flags = 0; ii.details = mac_intr_details; ii.actions = NULL; } fatal |= t4_handle_intr(adap, &ii, 0, verbose); if (chip_id(adap) >= CHELSIO_T5) { snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port); ii.name = &name[0]; ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE); ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN); ii.fatal = 0; ii.flags = 0; ii.details = NULL; ii.actions = NULL; fatal |= t4_handle_intr(adap, &ii, 0, verbose); } if (chip_id(adap) >= CHELSIO_T6) { snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port); ii.name = &name[0]; ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G); ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN_100G); ii.fatal = 0; ii.flags = 0; ii.details = NULL; ii.actions = NULL; fatal |= t4_handle_intr(adap, &ii, 0, verbose); } return (fatal); } static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose) { static const struct intr_details plpl_intr_details[] = { { F_FATALPERR, "Fatal parity error" }, { F_PERRVFID, "VFID_MAP parity error" }, { 0 } }; static const struct intr_info plpl_intr_info = { .name = "PL_PL_INT_CAUSE", .cause_reg = A_PL_PL_INT_CAUSE, .enable_reg = A_PL_PL_INT_ENABLE, .fatal = F_FATALPERR | F_PERRVFID, .flags = NONFATAL_IF_DISABLED, .details = plpl_intr_details, .actions = NULL, }; return (t4_handle_intr(adap, &plpl_intr_info, 0, verbose)); } /** * t4_slow_intr_handler - control path interrupt handler * @adap: the adapter * @verbose: increased verbosity, for debug * * T4 interrupt handler for non-data global interrupt events, e.g., errors. * The designation 'slow' is because it involves register reads, while * data interrupts typically don't involve any MMIOs. */ int t4_slow_intr_handler(struct adapter *adap, bool verbose) { static const struct intr_details pl_intr_details[] = { { F_MC1, "MC1" }, { F_UART, "UART" }, { F_ULP_TX, "ULP TX" }, { F_SGE, "SGE" }, { F_HMA, "HMA" }, { F_CPL_SWITCH, "CPL Switch" }, { F_ULP_RX, "ULP RX" }, { F_PM_RX, "PM RX" }, { F_PM_TX, "PM TX" }, { F_MA, "MA" }, { F_TP, "TP" }, { F_LE, "LE" }, { F_EDC1, "EDC1" }, { F_EDC0, "EDC0" }, { F_MC, "MC0" }, { F_PCIE, "PCIE" }, { F_PMU, "PMU" }, { F_MAC3, "MAC3" }, { F_MAC2, "MAC2" }, { F_MAC1, "MAC1" }, { F_MAC0, "MAC0" }, { F_SMB, "SMB" }, { F_SF, "SF" }, { F_PL, "PL" }, { F_NCSI, "NC-SI" }, { F_MPS, "MPS" }, { F_MI, "MI" }, { F_DBG, "DBG" }, { F_I2CM, "I2CM" }, { F_CIM, "CIM" }, { 0 } }; static const struct intr_info pl_perr_cause = { .name = "PL_PERR_CAUSE", .cause_reg = A_PL_PERR_CAUSE, .enable_reg = A_PL_PERR_ENABLE, .fatal = 0xffffffff, .flags = 0, .details = pl_intr_details, .actions = NULL, }; static const struct intr_action pl_intr_action[] = { { F_MC1, MEM_MC1, mem_intr_handler }, { F_ULP_TX, -1, ulptx_intr_handler }, { F_SGE, -1, sge_intr_handler }, { F_CPL_SWITCH, -1, cplsw_intr_handler }, { F_ULP_RX, -1, ulprx_intr_handler }, { F_PM_RX, -1, pmrx_intr_handler}, { F_PM_TX, -1, pmtx_intr_handler}, { F_MA, -1, ma_intr_handler }, { F_TP, -1, tp_intr_handler }, { F_LE, -1, le_intr_handler }, { F_EDC1, MEM_EDC1, mem_intr_handler }, { F_EDC0, MEM_EDC0, mem_intr_handler }, { F_MC0, MEM_MC0, mem_intr_handler }, { F_PCIE, -1, pcie_intr_handler }, { F_MAC3, 3, mac_intr_handler}, { F_MAC2, 2, mac_intr_handler}, { F_MAC1, 1, mac_intr_handler}, { F_MAC0, 0, mac_intr_handler}, { F_SMB, -1, smb_intr_handler}, { F_PL, -1, plpl_intr_handler }, { F_NCSI, -1, ncsi_intr_handler}, { F_MPS, -1, mps_intr_handler }, { F_CIM, -1, cim_intr_handler }, { 0 } }; static const struct intr_info pl_intr_info = { .name = "PL_INT_CAUSE", .cause_reg = A_PL_INT_CAUSE, .enable_reg = A_PL_INT_ENABLE, .fatal = 0, .flags = 0, .details = pl_intr_details, .actions = pl_intr_action, }; bool fatal; u32 perr; perr = t4_read_reg(adap, pl_perr_cause.cause_reg); if (verbose || perr != 0) { t4_show_intr_info(adap, &pl_perr_cause, perr); if (perr != 0) t4_write_reg(adap, pl_perr_cause.cause_reg, perr); if (verbose) perr |= t4_read_reg(adap, pl_intr_info.enable_reg); } fatal = t4_handle_intr(adap, &pl_intr_info, perr, verbose); if (fatal) t4_fatal_err(adap, false); return (0); } #define PF_INTR_MASK (F_PFSW | F_PFCIM) /** * t4_intr_enable - enable interrupts * @adapter: the adapter whose interrupts should be enabled * * Enable PF-specific interrupts for the calling function and the top-level * interrupt concentrator for global interrupts. Interrupts are already * enabled at each module, here we just enable the roots of the interrupt * hierarchies. * * Note: this function should be called only when the driver manages * non PF-specific interrupts from the various HW modules. Only one PCI * function at a time should be doing this. */ void t4_intr_enable(struct adapter *adap) { u32 val = 0; if (chip_id(adap) <= CHELSIO_T5) val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT; else val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN; val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR; t4_set_reg_field(adap, A_SGE_INT_ENABLE3, val, val); t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0); t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf); } /** * t4_intr_disable - disable interrupts * @adap: the adapter whose interrupts should be disabled * * Disable interrupts. We only disable the top-level interrupt * concentrators. The caller must be a PCI function managing global * interrupts. */ void t4_intr_disable(struct adapter *adap) { t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), 0); t4_set_reg_field(adap, A_PL_INT_MAP0, 1 << adap->pf, 0); } /** * t4_intr_clear - clear all interrupts * @adap: the adapter whose interrupts should be cleared * * Clears all interrupts. The caller must be a PCI function managing * global interrupts. */ void t4_intr_clear(struct adapter *adap) { static const u32 cause_reg[] = { A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE, MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), A_CPL_INTR_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 0), EDC_REG(A_EDC_INT_CAUSE, 1), A_LE_DB_INT_CAUSE, A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE, A_MPS_CLS_INT_CAUSE, A_MPS_RX_PERR_INT_CAUSE, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, A_MPS_STAT_PERR_INT_CAUSE_SRAM, A_MPS_TRC_INT_CAUSE, A_MPS_TX_INT_CAUSE, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, A_NCSI_INT_CAUSE, A_PCIE_INT_CAUSE, A_PCIE_NONFAT_ERR, A_PL_PL_INT_CAUSE, A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE, A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3, A_SGE_INT_CAUSE4, A_SMB_INT_CAUSE, A_TP_INT_CAUSE, A_ULP_RX_INT_CAUSE, A_ULP_RX_INT_CAUSE_2, A_ULP_TX_INT_CAUSE, A_ULP_TX_INT_CAUSE_2, MYPF_REG(A_PL_PF_INT_CAUSE), }; int i; const int nchan = adap->chip_params->nchan; for (i = 0; i < ARRAY_SIZE(cause_reg); i++) t4_write_reg(adap, cause_reg[i], 0xffffffff); if (is_t4(adap)) { t4_write_reg(adap, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 0xffffffff); t4_write_reg(adap, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 0xffffffff); t4_write_reg(adap, A_MC_INT_CAUSE, 0xffffffff); for (i = 0; i < nchan; i++) { t4_write_reg(adap, PORT_REG(i, A_XGMAC_PORT_INT_CAUSE), 0xffffffff); } } if (chip_id(adap) >= CHELSIO_T5) { t4_write_reg(adap, A_MA_PARITY_ERROR_STATUS2, 0xffffffff); t4_write_reg(adap, A_MPS_STAT_PERR_INT_CAUSE_SRAM1, 0xffffffff); t4_write_reg(adap, A_SGE_INT_CAUSE5, 0xffffffff); t4_write_reg(adap, A_MC_P_INT_CAUSE, 0xffffffff); if (is_t5(adap)) { t4_write_reg(adap, MC_REG(A_MC_P_INT_CAUSE, 1), 0xffffffff); } for (i = 0; i < nchan; i++) { t4_write_reg(adap, T5_PORT_REG(i, A_MAC_PORT_PERR_INT_CAUSE), 0xffffffff); if (chip_id(adap) > CHELSIO_T5) { t4_write_reg(adap, T5_PORT_REG(i, A_MAC_PORT_PERR_INT_CAUSE_100G), 0xffffffff); } t4_write_reg(adap, T5_PORT_REG(i, A_MAC_PORT_INT_CAUSE), 0xffffffff); } } if (chip_id(adap) >= CHELSIO_T6) { t4_write_reg(adap, A_SGE_INT_CAUSE6, 0xffffffff); } t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff); t4_write_reg(adap, A_PL_PERR_CAUSE, 0xffffffff); t4_write_reg(adap, A_PL_INT_CAUSE, 0xffffffff); (void) t4_read_reg(adap, A_PL_INT_CAUSE); /* flush */ } /** * hash_mac_addr - return the hash value of a MAC address * @addr: the 48-bit Ethernet MAC address * * Hashes a MAC address according to the hash function used by HW inexact * (hash) address matching. */ static int hash_mac_addr(const u8 *addr) { u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; a ^= b; a ^= (a >> 12); a ^= (a >> 6); return a & 0x3f; } /** * t4_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter * @mbox: mbox to use for the FW command * @viid: virtual interface whose RSS subtable is to be written * @start: start entry in the table to write * @n: how many table entries to write * @rspq: values for the "response queue" (Ingress Queue) lookup table * @nrspq: number of values in @rspq * * Programs the selected part of the VI's RSS mapping table with the * provided values. If @nrspq < @n the supplied values are used repeatedly * until the full table range is populated. * * The caller must ensure the values in @rspq are in the range allowed for * @viid. */ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq) { int ret; const u16 *rsp = rspq; const u16 *rsp_end = rspq + nrspq; struct fw_rss_ind_tbl_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_RSS_IND_TBL_CMD_VIID(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); /* * Each firmware RSS command can accommodate up to 32 RSS Ingress * Queue Identifiers. These Ingress Queue IDs are packed three to * a 32-bit word as 10-bit values with the upper remaining 2 bits * reserved. */ while (n > 0) { int nq = min(n, 32); int nq_packed = 0; __be32 *qp = &cmd.iq0_to_iq2; /* * Set up the firmware RSS command header to send the next * "nq" Ingress Queue IDs to the firmware. */ cmd.niqid = cpu_to_be16(nq); cmd.startidx = cpu_to_be16(start); /* * "nq" more done for the start of the next loop. */ start += nq; n -= nq; /* * While there are still Ingress Queue IDs to stuff into the * current firmware RSS command, retrieve them from the * Ingress Queue ID array and insert them into the command. */ while (nq > 0) { /* * Grab up to the next 3 Ingress Queue IDs (wrapping * around the Ingress Queue ID array if necessary) and * insert them into the firmware RSS command at the * current 3-tuple position within the commad. */ u16 qbuf[3]; u16 *qbp = qbuf; int nqbuf = min(3, nq); nq -= nqbuf; qbuf[0] = qbuf[1] = qbuf[2] = 0; while (nqbuf && nq_packed < 32) { nqbuf--; nq_packed++; *qbp++ = *rsp++; if (rsp >= rsp_end) rsp = rspq; } *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); } /* * Send this portion of the RRS table update to the firmware; * bail out on any errors. */ ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); if (ret) return ret; } return 0; } /** * t4_config_glbl_rss - configure the global RSS mode * @adapter: the adapter * @mbox: mbox to use for the FW command * @mode: global RSS mode * @flags: mode-specific flags * * Sets the global RSS mode. */ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, unsigned int flags) { struct fw_rss_glb_config_cmd c; memset(&c, 0, sizeof(c)); c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { c.u.manual.mode_pkd = cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { c.u.basicvirtual.mode_keymode = cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags); } else return -EINVAL; return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); } /** * t4_config_vi_rss - configure per VI RSS settings * @adapter: the adapter * @mbox: mbox to use for the FW command * @viid: the VI id * @flags: RSS flags * @defq: id of the default RSS queue for the VI. * @skeyidx: RSS secret key table index for non-global mode * @skey: RSS vf_scramble key for VI. * * Configures VI-specific RSS properties. */ int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, unsigned int flags, unsigned int defq, unsigned int skeyidx, unsigned int skey) { struct fw_rss_vi_config_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32( V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx)); c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey); return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); } /* Read an RSS table row */ static int rd_rss_row(struct adapter *adap, int row, u32 *val) { t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 5, 0, val); } /** * t4_read_rss - read the contents of the RSS mapping table * @adapter: the adapter * @map: holds the contents of the RSS mapping table * * Reads the contents of the RSS hash->queue mapping table. */ int t4_read_rss(struct adapter *adapter, u16 *map) { u32 val; int i, ret; int rss_nentries = adapter->chip_params->rss_nentries; for (i = 0; i < rss_nentries / 2; ++i) { ret = rd_rss_row(adapter, i, &val); if (ret) return ret; *map++ = G_LKPTBLQUEUE0(val); *map++ = G_LKPTBLQUEUE1(val); } return 0; } /** * t4_tp_fw_ldst_rw - Access TP indirect register through LDST * @adap: the adapter * @cmd: TP fw ldst address space type * @vals: where the indirect register values are stored/written * @nregs: how many indirect registers to read/write * @start_idx: index of first indirect register to read/write * @rw: Read (1) or Write (0) * @sleep_ok: if true we may sleep while awaiting command completion * * Access TP indirect registers through LDST **/ static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals, unsigned int nregs, unsigned int start_index, unsigned int rw, bool sleep_ok) { int ret = 0; unsigned int i; struct fw_ldst_cmd c; for (i = 0; i < nregs; i++) { memset(&c, 0, sizeof(c)); c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | (rw ? F_FW_CMD_READ : F_FW_CMD_WRITE) | V_FW_LDST_CMD_ADDRSPACE(cmd)); c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c.u.addrval.addr = cpu_to_be32(start_index + i); c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); if (ret) return ret; if (rw) vals[i] = be32_to_cpu(c.u.addrval.val); } return 0; } /** * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor * @adap: the adapter * @reg_addr: Address Register * @reg_data: Data register * @buff: where the indirect register values are stored/written * @nregs: how many indirect registers to read/write * @start_index: index of first indirect register to read/write * @rw: READ(1) or WRITE(0) * @sleep_ok: if true we may sleep while awaiting command completion * * Read/Write TP indirect registers through LDST if possible. * Else, use backdoor access **/ static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data, u32 *buff, u32 nregs, u32 start_index, int rw, bool sleep_ok) { int rc = -EINVAL; int cmd; switch (reg_addr) { case A_TP_PIO_ADDR: cmd = FW_LDST_ADDRSPC_TP_PIO; break; case A_TP_TM_PIO_ADDR: cmd = FW_LDST_ADDRSPC_TP_TM_PIO; break; case A_TP_MIB_INDEX: cmd = FW_LDST_ADDRSPC_TP_MIB; break; default: goto indirect_access; } if (t4_use_ldst(adap)) rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw, sleep_ok); indirect_access: if (rc) { if (rw) t4_read_indirect(adap, reg_addr, reg_data, buff, nregs, start_index); else t4_write_indirect(adap, reg_addr, reg_data, buff, nregs, start_index); } } /** * t4_tp_pio_read - Read TP PIO registers * @adap: the adapter * @buff: where the indirect register values are written * @nregs: how many indirect registers to read * @start_index: index of first indirect register to read * @sleep_ok: if true we may sleep while awaiting command completion * * Read TP PIO Registers **/ void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok) { t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs, start_index, 1, sleep_ok); } /** * t4_tp_pio_write - Write TP PIO registers * @adap: the adapter * @buff: where the indirect register values are stored * @nregs: how many indirect registers to write * @start_index: index of first indirect register to write * @sleep_ok: if true we may sleep while awaiting command completion * * Write TP PIO Registers **/ void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs, u32 start_index, bool sleep_ok) { t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok); } /** * t4_tp_tm_pio_read - Read TP TM PIO registers * @adap: the adapter * @buff: where the indirect register values are written * @nregs: how many indirect registers to read * @start_index: index of first indirect register to read * @sleep_ok: if true we may sleep while awaiting command completion * * Read TP TM PIO Registers **/ void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok) { t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff, nregs, start_index, 1, sleep_ok); } /** * t4_tp_mib_read - Read TP MIB registers * @adap: the adapter * @buff: where the indirect register values are written * @nregs: how many indirect registers to read * @start_index: index of first indirect register to read * @sleep_ok: if true we may sleep while awaiting command completion * * Read TP MIB Registers **/ void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok) { t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs, start_index, 1, sleep_ok); } /** * t4_read_rss_key - read the global RSS key * @adap: the adapter * @key: 10-entry array holding the 320-bit RSS key * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the global 320-bit RSS key. */ void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok) { t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); } /** * t4_write_rss_key - program one of the RSS keys * @adap: the adapter * @key: 10-entry array holding the 320-bit RSS key * @idx: which RSS key to write * @sleep_ok: if true we may sleep while awaiting command completion * * Writes one of the RSS keys with the given 320-bit value. If @idx is * 0..15 the corresponding entry in the RSS key table is written, * otherwise the global RSS key is written. */ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, bool sleep_ok) { u8 rss_key_addr_cnt = 16; u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); /* * T6 and later: for KeyMode 3 (per-vf and per-vf scramble), * allows access to key addresses 16-63 by using KeyWrAddrX * as index[5:4](upper 2) into key table */ if ((chip_id(adap) > CHELSIO_T5) && (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) rss_key_addr_cnt = 32; t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); if (idx >= 0 && idx < rss_key_addr_cnt) { if (rss_key_addr_cnt > 16) t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, vrt | V_KEYWRADDRX(idx >> 4) | V_T6_VFWRADDR(idx) | F_KEYWREN); else t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, vrt| V_KEYWRADDR(idx) | F_KEYWREN); } } /** * t4_read_rss_pf_config - read PF RSS Configuration Table * @adapter: the adapter * @index: the entry in the PF RSS table to read * @valp: where to store the returned value * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the PF RSS Configuration Table at the specified index and returns * the value found there. */ void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp, bool sleep_ok) { t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok); } /** * t4_write_rss_pf_config - write PF RSS Configuration Table * @adapter: the adapter * @index: the entry in the VF RSS table to read * @val: the value to store * @sleep_ok: if true we may sleep while awaiting command completion * * Writes the PF RSS Configuration Table at the specified index with the * specified value. */ void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val, bool sleep_ok) { t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok); } /** * t4_read_rss_vf_config - read VF RSS Configuration Table * @adapter: the adapter * @index: the entry in the VF RSS table to read * @vfl: where to store the returned VFL * @vfh: where to store the returned VFH * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the VF RSS Configuration Table at the specified index and returns * the (VFL, VFH) values found there. */ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, u32 *vfl, u32 *vfh, bool sleep_ok) { u32 vrt, mask, data; if (chip_id(adapter) <= CHELSIO_T5) { mask = V_VFWRADDR(M_VFWRADDR); data = V_VFWRADDR(index); } else { mask = V_T6_VFWRADDR(M_T6_VFWRADDR); data = V_T6_VFWRADDR(index); } /* * Request that the index'th VF Table values be read into VFL/VFH. */ vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); vrt |= data | F_VFRDEN; t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); /* * Grab the VFL/VFH values ... */ t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); } /** * t4_write_rss_vf_config - write VF RSS Configuration Table * * @adapter: the adapter * @index: the entry in the VF RSS table to write * @vfl: the VFL to store * @vfh: the VFH to store * * Writes the VF RSS Configuration Table at the specified index with the * specified (VFL, VFH) values. */ void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, u32 vfl, u32 vfh, bool sleep_ok) { u32 vrt, mask, data; if (chip_id(adapter) <= CHELSIO_T5) { mask = V_VFWRADDR(M_VFWRADDR); data = V_VFWRADDR(index); } else { mask = V_T6_VFWRADDR(M_T6_VFWRADDR); data = V_T6_VFWRADDR(index); } /* * Load up VFL/VFH with the values to be written ... */ t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); /* * Write the VFL/VFH into the VF Table at index'th location. */ vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); vrt |= data | F_VFRDEN; t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); } /** * t4_read_rss_pf_map - read PF RSS Map * @adapter: the adapter * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the PF RSS Map register and returns its value. */ u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok) { u32 pfmap; t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); return pfmap; } /** * t4_write_rss_pf_map - write PF RSS Map * @adapter: the adapter * @pfmap: PF RSS Map value * * Writes the specified value to the PF RSS Map register. */ void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok) { t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); } /** * t4_read_rss_pf_mask - read PF RSS Mask * @adapter: the adapter * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the PF RSS Mask register and returns its value. */ u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok) { u32 pfmask; t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); return pfmask; } /** * t4_write_rss_pf_mask - write PF RSS Mask * @adapter: the adapter * @pfmask: PF RSS Mask value * * Writes the specified value to the PF RSS Mask register. */ void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok) { t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); } /** * t4_tp_get_tcp_stats - read TP's TCP MIB counters * @adap: the adapter * @v4: holds the TCP/IP counter values * @v6: holds the TCP/IPv6 counter values * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. * Either @v4 or @v6 may be %NULL to skip the corresponding stats. */ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6, bool sleep_ok) { u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) #define STAT(x) val[STAT_IDX(x)] #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) if (v4) { t4_tp_mib_read(adap, val, ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST, sleep_ok); v4->tcp_out_rsts = STAT(OUT_RST); v4->tcp_in_segs = STAT64(IN_SEG); v4->tcp_out_segs = STAT64(OUT_SEG); v4->tcp_retrans_segs = STAT64(RXT_SEG); } if (v6) { t4_tp_mib_read(adap, val, ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST, sleep_ok); v6->tcp_out_rsts = STAT(OUT_RST); v6->tcp_in_segs = STAT64(IN_SEG); v6->tcp_out_segs = STAT64(OUT_SEG); v6->tcp_retrans_segs = STAT64(RXT_SEG); } #undef STAT64 #undef STAT #undef STAT_IDX } /** * t4_tp_get_err_stats - read TP's error MIB counters * @adap: the adapter * @st: holds the counter values * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's error counters. */ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, bool sleep_ok) { int nchan = adap->chip_params->nchan; t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0, sleep_ok); t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0, sleep_ok); t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0, sleep_ok); t4_tp_mib_read(adap, st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0, sleep_ok); t4_tp_mib_read(adap, st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0, sleep_ok); t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0, sleep_ok); t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0, sleep_ok); t4_tp_mib_read(adap, st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok); t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP, sleep_ok); } /** * t4_tp_get_err_stats - read TP's error MIB counters * @adap: the adapter * @st: holds the counter values * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's error counters. */ void t4_tp_get_tnl_stats(struct adapter *adap, struct tp_tnl_stats *st, bool sleep_ok) { int nchan = adap->chip_params->nchan; t4_tp_mib_read(adap, st->out_pkt, nchan, A_TP_MIB_TNL_OUT_PKT_0, sleep_ok); t4_tp_mib_read(adap, st->in_pkt, nchan, A_TP_MIB_TNL_IN_PKT_0, sleep_ok); } /** * t4_tp_get_proxy_stats - read TP's proxy MIB counters * @adap: the adapter * @st: holds the counter values * * Returns the values of TP's proxy counters. */ void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st, bool sleep_ok) { int nchan = adap->chip_params->nchan; t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok); } /** * t4_tp_get_cpl_stats - read TP's CPL MIB counters * @adap: the adapter * @st: holds the counter values * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's CPL counters. */ void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, bool sleep_ok) { int nchan = adap->chip_params->nchan; t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok); t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok); } /** * t4_tp_get_rdma_stats - read TP's RDMA MIB counters * @adap: the adapter * @st: holds the counter values * * Returns the values of TP's RDMA counters. */ void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, bool sleep_ok) { t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT, sleep_ok); } /** * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port * @adap: the adapter * @idx: the port index * @st: holds the counter values * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's FCoE counters for the selected port. */ void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, struct tp_fcoe_stats *st, bool sleep_ok) { u32 val[2]; t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx, sleep_ok); t4_tp_mib_read(adap, &st->frames_drop, 1, A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok); t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx, sleep_ok); st->octets_ddp = ((u64)val[0] << 32) | val[1]; } /** * t4_get_usm_stats - read TP's non-TCP DDP MIB counters * @adap: the adapter * @st: holds the counter values * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's counters for non-TCP directly-placed packets. */ void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, bool sleep_ok) { u32 val[4]; t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok); st->frames = val[0]; st->drops = val[1]; st->octets = ((u64)val[2] << 32) | val[3]; } /** * t4_tp_get_tid_stats - read TP's tid MIB counters. * @adap: the adapter * @st: holds the counter values * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's counters for tids. */ void t4_tp_get_tid_stats(struct adapter *adap, struct tp_tid_stats *st, bool sleep_ok) { t4_tp_mib_read(adap, &st->del, 4, A_TP_MIB_TID_DEL, sleep_ok); } /** * t4_read_mtu_tbl - returns the values in the HW path MTU table * @adap: the adapter * @mtus: where to store the MTU values * @mtu_log: where to store the MTU base-2 log (may be %NULL) * * Reads the HW path MTU table. */ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) { u32 v; int i; for (i = 0; i < NMTUS; ++i) { t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(0xff) | V_MTUVALUE(i)); v = t4_read_reg(adap, A_TP_MTU_TABLE); mtus[i] = G_MTUVALUE(v); if (mtu_log) mtu_log[i] = G_MTUWIDTH(v); } } /** * t4_read_cong_tbl - reads the congestion control table * @adap: the adapter * @incr: where to store the alpha values * * Reads the additive increments programmed into the HW congestion * control table. */ void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) { unsigned int mtu, w; for (mtu = 0; mtu < NMTUS; ++mtu) for (w = 0; w < NCCTRL_WIN; ++w) { t4_write_reg(adap, A_TP_CCTRL_TABLE, V_ROWINDEX(0xffff) | (mtu << 5) | w); incr[mtu][w] = (u16)t4_read_reg(adap, A_TP_CCTRL_TABLE) & 0x1fff; } } /** * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register * @adap: the adapter * @addr: the indirect TP register address * @mask: specifies the field within the register to modify * @val: new value for the field * * Sets a field of an indirect TP register to the given value. */ void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, unsigned int mask, unsigned int val) { t4_write_reg(adap, A_TP_PIO_ADDR, addr); val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; t4_write_reg(adap, A_TP_PIO_DATA, val); } /** * init_cong_ctrl - initialize congestion control parameters * @a: the alpha values for congestion control * @b: the beta values for congestion control * * Initialize the congestion control parameters. */ static void init_cong_ctrl(unsigned short *a, unsigned short *b) { a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; a[9] = 2; a[10] = 3; a[11] = 4; a[12] = 5; a[13] = 6; a[14] = 7; a[15] = 8; a[16] = 9; a[17] = 10; a[18] = 14; a[19] = 17; a[20] = 21; a[21] = 25; a[22] = 30; a[23] = 35; a[24] = 45; a[25] = 60; a[26] = 80; a[27] = 100; a[28] = 200; a[29] = 300; a[30] = 400; a[31] = 500; b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; b[9] = b[10] = 1; b[11] = b[12] = 2; b[13] = b[14] = b[15] = b[16] = 3; b[17] = b[18] = b[19] = b[20] = b[21] = 4; b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; b[28] = b[29] = 6; b[30] = b[31] = 7; } /* The minimum additive increment value for the congestion control table */ #define CC_MIN_INCR 2U /** * t4_load_mtus - write the MTU and congestion control HW tables * @adap: the adapter * @mtus: the values for the MTU table * @alpha: the values for the congestion control alpha parameter * @beta: the values for the congestion control beta parameter * * Write the HW MTU table with the supplied MTUs and the high-speed * congestion control table with the supplied alpha, beta, and MTUs. * We write the two tables together because the additive increments * depend on the MTUs. */ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, const unsigned short *alpha, const unsigned short *beta) { static const unsigned int avg_pkts[NCCTRL_WIN] = { 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 28672, 40960, 57344, 81920, 114688, 163840, 229376 }; unsigned int i, w; for (i = 0; i < NMTUS; ++i) { unsigned int mtu = mtus[i]; unsigned int log2 = fls(mtu); if (!(mtu & ((1 << log2) >> 2))) /* round */ log2--; t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); for (w = 0; w < NCCTRL_WIN; ++w) { unsigned int inc; inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], CC_MIN_INCR); t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | (w << 16) | (beta[w] << 13) | inc); } } } /** * t4_set_pace_tbl - set the pace table * @adap: the adapter * @pace_vals: the pace values in microseconds * @start: index of the first entry in the HW pace table to set * @n: how many entries to set * * Sets (a subset of the) HW pace table. */ int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, unsigned int start, unsigned int n) { unsigned int vals[NTX_SCHED], i; unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); if (n > NTX_SCHED) return -ERANGE; /* convert values from us to dack ticks, rounding to closest value */ for (i = 0; i < n; i++, pace_vals++) { vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns; if (vals[i] > 0x7ff) return -ERANGE; if (*pace_vals && vals[i] == 0) return -ERANGE; } for (i = 0; i < n; i++, start++) t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]); return 0; } /** * t4_set_sched_bps - set the bit rate for a HW traffic scheduler * @adap: the adapter * @kbps: target rate in Kbps * @sched: the scheduler index * * Configure a Tx HW scheduler for the target rate. */ int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps) { unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; unsigned int clk = adap->params.vpd.cclk * 1000; unsigned int selected_cpt = 0, selected_bpt = 0; if (kbps > 0) { kbps *= 125; /* -> bytes */ for (cpt = 1; cpt <= 255; cpt++) { tps = clk / cpt; bpt = (kbps + tps / 2) / tps; if (bpt > 0 && bpt <= 255) { v = bpt * tps; delta = v >= kbps ? v - kbps : kbps - v; if (delta < mindelta) { mindelta = delta; selected_cpt = cpt; selected_bpt = bpt; } } else if (selected_cpt) break; } if (!selected_cpt) return -EINVAL; } t4_write_reg(adap, A_TP_TM_PIO_ADDR, A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); v = t4_read_reg(adap, A_TP_TM_PIO_DATA); if (sched & 1) v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); else v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); t4_write_reg(adap, A_TP_TM_PIO_DATA, v); return 0; } /** * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler * @adap: the adapter * @sched: the scheduler index * @ipg: the interpacket delay in tenths of nanoseconds * * Set the interpacket delay for a HW packet rate scheduler. */ int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg) { unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; /* convert ipg to nearest number of core clocks */ ipg *= core_ticks_per_usec(adap); ipg = (ipg + 5000) / 10000; if (ipg > M_TXTIMERSEPQ0) return -EINVAL; t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); v = t4_read_reg(adap, A_TP_TM_PIO_DATA); if (sched & 1) v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg); else v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg); t4_write_reg(adap, A_TP_TM_PIO_DATA, v); t4_read_reg(adap, A_TP_TM_PIO_DATA); return 0; } /* * Calculates a rate in bytes/s given the number of 256-byte units per 4K core * clocks. The formula is * * bytes/s = bytes256 * 256 * ClkFreq / 4096 * * which is equivalent to * * bytes/s = 62.5 * bytes256 * ClkFreq_ms */ static u64 chan_rate(struct adapter *adap, unsigned int bytes256) { u64 v = (u64)bytes256 * adap->params.vpd.cclk; return v * 62 + v / 2; } /** * t4_get_chan_txrate - get the current per channel Tx rates * @adap: the adapter * @nic_rate: rates for NIC traffic * @ofld_rate: rates for offloaded traffic * * Return the current Tx rates in bytes/s for NIC and offloaded traffic * for each channel. */ void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) { u32 v; v = t4_read_reg(adap, A_TP_TX_TRATE); nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); if (adap->chip_params->nchan > 2) { nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); } v = t4_read_reg(adap, A_TP_TX_ORATE); ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); if (adap->chip_params->nchan > 2) { ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); } } /** * t4_set_trace_filter - configure one of the tracing filters * @adap: the adapter * @tp: the desired trace filter parameters * @idx: which filter to configure * @enable: whether to enable or disable the filter * * Configures one of the tracing filters available in HW. If @tp is %NULL * it indicates that the filter is already written in the register and it * just needs to be enabled or disabled. */ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx, int enable) { int i, ofst = idx * 4; u32 data_reg, mask_reg, cfg; u32 multitrc = F_TRCMULTIFILTER; u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN; if (idx < 0 || idx >= NTRACE) return -EINVAL; if (tp == NULL || !enable) { t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, enable ? en : 0); return 0; } /* * TODO - After T4 data book is updated, specify the exact * section below. * * See T4 data book - MPS section for a complete description * of the below if..else handling of A_MPS_TRC_CFG register * value. */ cfg = t4_read_reg(adap, A_MPS_TRC_CFG); if (cfg & F_TRCMULTIFILTER) { /* * If multiple tracers are enabled, then maximum * capture size is 2.5KB (FIFO size of a single channel) * minus 2 flits for CPL_TRACE_PKT header. */ if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) return -EINVAL; } else { /* * If multiple tracers are disabled, to avoid deadlocks * maximum packet capture size of 9600 bytes is recommended. * Also in this mode, only trace0 can be enabled and running. */ multitrc = 0; if (tp->snap_len > 9600 || idx) return -EINVAL; } if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 || tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE) return -EINVAL; /* stop the tracer we'll be changing */ t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0); idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); data_reg = A_MPS_TRC_FILTER0_MATCH + idx; mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { t4_write_reg(adap, data_reg, tp->data[i]); t4_write_reg(adap, mask_reg, ~tp->mask[i]); } t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, V_TFCAPTUREMAX(tp->snap_len) | V_TFMINPKTSIZE(tp->min_len)); t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en | (is_t4(adap) ? V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) : V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert))); return 0; } /** * t4_get_trace_filter - query one of the tracing filters * @adap: the adapter * @tp: the current trace filter parameters * @idx: which trace filter to query * @enabled: non-zero if the filter is enabled * * Returns the current settings of one of the HW tracing filters. */ void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, int *enabled) { u32 ctla, ctlb; int i, ofst = idx * 4; u32 data_reg, mask_reg; ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); if (is_t4(adap)) { *enabled = !!(ctla & F_TFEN); tp->port = G_TFPORT(ctla); tp->invert = !!(ctla & F_TFINVERTMATCH); } else { *enabled = !!(ctla & F_T5_TFEN); tp->port = G_T5_TFPORT(ctla); tp->invert = !!(ctla & F_T5_TFINVERTMATCH); } tp->snap_len = G_TFCAPTUREMAX(ctlb); tp->min_len = G_TFMINPKTSIZE(ctlb); tp->skip_ofst = G_TFOFFSET(ctla); tp->skip_len = G_TFLENGTH(ctla); ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { tp->mask[i] = ~t4_read_reg(adap, mask_reg); tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; } } /** * t4_pmtx_get_stats - returns the HW stats from PMTX * @adap: the adapter * @cnt: where to store the count statistics * @cycles: where to store the cycle statistics * * Returns performance statistics from PMTX. */ void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) { int i; u32 data[2]; for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); if (is_t4(adap)) cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); else { t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, data, 2, A_PM_TX_DBG_STAT_MSB); cycles[i] = (((u64)data[0] << 32) | data[1]); } } } /** * t4_pmrx_get_stats - returns the HW stats from PMRX * @adap: the adapter * @cnt: where to store the count statistics * @cycles: where to store the cycle statistics * * Returns performance statistics from PMRX. */ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) { int i; u32 data[2]; for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); if (is_t4(adap)) { cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); } else { t4_read_indirect(adap, A_PM_RX_DBG_CTRL, A_PM_RX_DBG_DATA, data, 2, A_PM_RX_DBG_STAT_MSB); cycles[i] = (((u64)data[0] << 32) | data[1]); } } } /** * t4_get_mps_bg_map - return the buffer groups associated with a port * @adap: the adapter * @idx: the port index * * Returns a bitmap indicating which MPS buffer groups are associated * with the given port. Bit i is set if buffer group i is used by the * port. */ static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) { u32 n; if (adap->params.mps_bg_map) return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff); n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); if (n == 0) return idx == 0 ? 0xf : 0; if (n == 1 && chip_id(adap) <= CHELSIO_T5) return idx < 2 ? (3 << (2 * idx)) : 0; return 1 << idx; } /* * TP RX e-channels associated with the port. */ static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx) { u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); const u32 all_chan = (1 << adap->chip_params->nchan) - 1; if (n == 0) return idx == 0 ? all_chan : 0; if (n == 1 && chip_id(adap) <= CHELSIO_T5) return idx < 2 ? (3 << (2 * idx)) : 0; return 1 << idx; } /* * TP RX c-channel associated with the port. */ static unsigned int t4_get_rx_c_chan(struct adapter *adap, int idx) { u32 param, val; int ret; param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPCHMAP)); ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); if (!ret) return (val >> (8 * idx)) & 0xff; return 0; } /** * t4_get_port_type_description - return Port Type string description * @port_type: firmware Port Type enumeration */ const char *t4_get_port_type_description(enum fw_port_type port_type) { static const char *const port_type_description[] = { "Fiber_XFI", "Fiber_XAUI", "BT_SGMII", "BT_XFI", "BT_XAUI", "KX4", "CX4", "KX", "KR", "SFP", "BP_AP", "BP4_AP", "QSFP_10G", "QSA", "QSFP", "BP40_BA", "KR4_100G", "CR4_QSFP", "CR_QSFP", "CR2_QSFP", "SFP28", "KR_SFP28", }; if (port_type < ARRAY_SIZE(port_type_description)) return port_type_description[port_type]; return "UNKNOWN"; } /** * t4_get_port_stats_offset - collect port stats relative to a previous * snapshot * @adap: The adapter * @idx: The port * @stats: Current stats to fill * @offset: Previous stats snapshot */ void t4_get_port_stats_offset(struct adapter *adap, int idx, struct port_stats *stats, struct port_stats *offset) { u64 *s, *o; int i; t4_get_port_stats(adap, idx, stats); for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; i < (sizeof(struct port_stats)/sizeof(u64)) ; i++, s++, o++) *s -= *o; } /** * t4_get_port_stats - collect port statistics * @adap: the adapter * @idx: the port index * @p: the stats structure to fill * * Collect statistics related to the given port from HW. */ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) { struct port_info *pi = adap->port[idx]; u32 bgmap = pi->mps_bg_map; u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL); #define GET_STAT(name) \ t4_read_reg64(adap, \ (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \ T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) p->tx_pause = GET_STAT(TX_PORT_PAUSE); p->tx_octets = GET_STAT(TX_PORT_BYTES); p->tx_frames = GET_STAT(TX_PORT_FRAMES); p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); p->tx_error_frames = GET_STAT(TX_PORT_ERROR); p->tx_frames_64 = GET_STAT(TX_PORT_64B); p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); p->tx_drop = GET_STAT(TX_PORT_DROP); p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); if (chip_id(adap) >= CHELSIO_T5) { if (stat_ctl & F_COUNTPAUSESTATTX) { p->tx_frames -= p->tx_pause; p->tx_octets -= p->tx_pause * 64; } if (stat_ctl & F_COUNTPAUSEMCTX) p->tx_mcast_frames -= p->tx_pause; } p->rx_pause = GET_STAT(RX_PORT_PAUSE); p->rx_octets = GET_STAT(RX_PORT_BYTES); p->rx_frames = GET_STAT(RX_PORT_FRAMES); p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); p->rx_runt = GET_STAT(RX_PORT_LESS_64B); p->rx_frames_64 = GET_STAT(RX_PORT_64B); p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); if (pi->fcs_reg != -1) p->rx_fcs_err = t4_read_reg64(adap, pi->fcs_reg) - pi->fcs_base; if (chip_id(adap) >= CHELSIO_T5) { if (stat_ctl & F_COUNTPAUSESTATRX) { p->rx_frames -= p->rx_pause; p->rx_octets -= p->rx_pause * 64; } if (stat_ctl & F_COUNTPAUSEMCRX) p->rx_mcast_frames -= p->rx_pause; } p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; #undef GET_STAT #undef GET_STAT_COM } /** * t4_get_lb_stats - collect loopback port statistics * @adap: the adapter * @idx: the loopback port index * @p: the stats structure to fill * * Return HW statistics for the given loopback port. */ void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) { #define GET_STAT(name) \ t4_read_reg64(adap, \ (is_t4(adap) ? \ PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \ T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))) #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) p->octets = GET_STAT(BYTES); p->frames = GET_STAT(FRAMES); p->bcast_frames = GET_STAT(BCAST); p->mcast_frames = GET_STAT(MCAST); p->ucast_frames = GET_STAT(UCAST); p->error_frames = GET_STAT(ERROR); p->frames_64 = GET_STAT(64B); p->frames_65_127 = GET_STAT(65B_127B); p->frames_128_255 = GET_STAT(128B_255B); p->frames_256_511 = GET_STAT(256B_511B); p->frames_512_1023 = GET_STAT(512B_1023B); p->frames_1024_1518 = GET_STAT(1024B_1518B); p->frames_1519_max = GET_STAT(1519B_MAX); p->drop = GET_STAT(DROP_FRAMES); if (idx < adap->params.nports) { u32 bg = adap2pinfo(adap, idx)->mps_bg_map; p->ovflow0 = (bg & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; p->ovflow1 = (bg & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; p->ovflow2 = (bg & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; p->ovflow3 = (bg & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; p->trunc0 = (bg & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; p->trunc1 = (bg & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; p->trunc2 = (bg & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; p->trunc3 = (bg & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; } #undef GET_STAT #undef GET_STAT_COM } /** * t4_wol_magic_enable - enable/disable magic packet WoL * @adap: the adapter * @port: the physical port index * @addr: MAC address expected in magic packets, %NULL to disable * * Enables/disables magic packet wake-on-LAN for the selected port. */ void t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr) { u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; if (is_t4(adap)) { mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO); mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI); port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); } else { mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO); mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI); port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); } if (addr) { t4_write_reg(adap, mag_id_reg_l, (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]); t4_write_reg(adap, mag_id_reg_h, (addr[0] << 8) | addr[1]); } t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN, V_MAGICEN(addr != NULL)); } /** * t4_wol_pat_enable - enable/disable pattern-based WoL * @adap: the adapter * @port: the physical port index * @map: bitmap of which HW pattern filters to set * @mask0: byte mask for bytes 0-63 of a packet * @mask1: byte mask for bytes 64-127 of a packet * @crc: Ethernet CRC for selected bytes * @enable: enable/disable switch * * Sets the pattern filters indicated in @map to mask out the bytes * specified in @mask0/@mask1 in received packets and compare the CRC of * the resulting packet against @crc. If @enable is %true pattern-based * WoL is enabled, otherwise disabled. */ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, u64 mask0, u64 mask1, unsigned int crc, bool enable) { int i; u32 port_cfg_reg; if (is_t4(adap)) port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); else port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); if (!enable) { t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0); return 0; } if (map > 0xff) return -EINVAL; #define EPIO_REG(name) \ (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \ T5_PORT_REG(port, A_MAC_PORT_EPIO_##name)) t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); t4_write_reg(adap, EPIO_REG(DATA2), mask1); t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); for (i = 0; i < NWOL_PAT; i++, map >>= 1) { if (!(map & 1)) continue; /* write byte masks */ t4_write_reg(adap, EPIO_REG(DATA0), mask0); t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR); t4_read_reg(adap, EPIO_REG(OP)); /* flush */ if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) return -ETIMEDOUT; /* write CRC */ t4_write_reg(adap, EPIO_REG(DATA0), crc); t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR); t4_read_reg(adap, EPIO_REG(OP)); /* flush */ if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) return -ETIMEDOUT; } #undef EPIO_REG t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN); return 0; } /* t4_mk_filtdelwr - create a delete filter WR * @ftid: the filter ID * @wr: the filter work request to populate * @qid: ingress queue to receive the delete notification * * Creates a filter work request to delete the supplied filter. If @qid is * negative the delete notification is suppressed. */ void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) { memset(wr, 0, sizeof(*wr)); wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16)); wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) | V_FW_FILTER_WR_NOREPLY(qid < 0)); wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER); if (qid >= 0) wr->rx_chan_rx_rpl_iq = cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid)); } #define INIT_CMD(var, cmd, rd_wr) do { \ (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ F_FW_CMD_REQUEST | \ F_FW_CMD_##rd_wr); \ (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ } while (0) int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val) { u32 ldst_addrspace; struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE); c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | ldst_addrspace); c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c.u.addrval.addr = cpu_to_be32(addr); c.u.addrval.val = cpu_to_be32(val); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_mdio_rd - read a PHY register through MDIO * @adap: the adapter * @mbox: mailbox to use for the FW command * @phy_addr: the PHY address * @mmd: the PHY MMD to access (0 for clause 22 PHYs) * @reg: the register to read * @valp: where to store the value * * Issues a FW command through the given mailbox to read a PHY register. */ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, unsigned int *valp) { int ret; u32 ldst_addrspace; struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | ldst_addrspace); c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | V_FW_LDST_CMD_MMD(mmd)); c.u.mdio.raddr = cpu_to_be16(reg); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) *valp = be16_to_cpu(c.u.mdio.rval); return ret; } /** * t4_mdio_wr - write a PHY register through MDIO * @adap: the adapter * @mbox: mailbox to use for the FW command * @phy_addr: the PHY address * @mmd: the PHY MMD to access (0 for clause 22 PHYs) * @reg: the register to write * @valp: value to write * * Issues a FW command through the given mailbox to write a PHY register. */ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, unsigned int val) { u32 ldst_addrspace; struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | ldst_addrspace); c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | V_FW_LDST_CMD_MMD(mmd)); c.u.mdio.raddr = cpu_to_be16(reg); c.u.mdio.rval = cpu_to_be16(val); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * * t4_sge_decode_idma_state - decode the idma state * @adap: the adapter * @state: the state idma is stuck in */ void t4_sge_decode_idma_state(struct adapter *adapter, int state) { static const char * const t4_decode[] = { "IDMA_IDLE", "IDMA_PUSH_MORE_CPL_FIFO", "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", "Not used", "IDMA_PHYSADDR_SEND_PCIEHDR", "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", "IDMA_PHYSADDR_SEND_PAYLOAD", "IDMA_SEND_FIFO_TO_IMSG", "IDMA_FL_REQ_DATA_FL_PREP", "IDMA_FL_REQ_DATA_FL", "IDMA_FL_DROP", "IDMA_FL_H_REQ_HEADER_FL", "IDMA_FL_H_SEND_PCIEHDR", "IDMA_FL_H_PUSH_CPL_FIFO", "IDMA_FL_H_SEND_CPL", "IDMA_FL_H_SEND_IP_HDR_FIRST", "IDMA_FL_H_SEND_IP_HDR", "IDMA_FL_H_REQ_NEXT_HEADER_FL", "IDMA_FL_H_SEND_NEXT_PCIEHDR", "IDMA_FL_H_SEND_IP_HDR_PADDING", "IDMA_FL_D_SEND_PCIEHDR", "IDMA_FL_D_SEND_CPL_AND_IP_HDR", "IDMA_FL_D_REQ_NEXT_DATA_FL", "IDMA_FL_SEND_PCIEHDR", "IDMA_FL_PUSH_CPL_FIFO", "IDMA_FL_SEND_CPL", "IDMA_FL_SEND_PAYLOAD_FIRST", "IDMA_FL_SEND_PAYLOAD", "IDMA_FL_REQ_NEXT_DATA_FL", "IDMA_FL_SEND_NEXT_PCIEHDR", "IDMA_FL_SEND_PADDING", "IDMA_FL_SEND_COMPLETION_TO_IMSG", "IDMA_FL_SEND_FIFO_TO_IMSG", "IDMA_FL_REQ_DATAFL_DONE", "IDMA_FL_REQ_HEADERFL_DONE", }; static const char * const t5_decode[] = { "IDMA_IDLE", "IDMA_ALMOST_IDLE", "IDMA_PUSH_MORE_CPL_FIFO", "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", "IDMA_PHYSADDR_SEND_PCIEHDR", "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", "IDMA_PHYSADDR_SEND_PAYLOAD", "IDMA_SEND_FIFO_TO_IMSG", "IDMA_FL_REQ_DATA_FL", "IDMA_FL_DROP", "IDMA_FL_DROP_SEND_INC", "IDMA_FL_H_REQ_HEADER_FL", "IDMA_FL_H_SEND_PCIEHDR", "IDMA_FL_H_PUSH_CPL_FIFO", "IDMA_FL_H_SEND_CPL", "IDMA_FL_H_SEND_IP_HDR_FIRST", "IDMA_FL_H_SEND_IP_HDR", "IDMA_FL_H_REQ_NEXT_HEADER_FL", "IDMA_FL_H_SEND_NEXT_PCIEHDR", "IDMA_FL_H_SEND_IP_HDR_PADDING", "IDMA_FL_D_SEND_PCIEHDR", "IDMA_FL_D_SEND_CPL_AND_IP_HDR", "IDMA_FL_D_REQ_NEXT_DATA_FL", "IDMA_FL_SEND_PCIEHDR", "IDMA_FL_PUSH_CPL_FIFO", "IDMA_FL_SEND_CPL", "IDMA_FL_SEND_PAYLOAD_FIRST", "IDMA_FL_SEND_PAYLOAD", "IDMA_FL_REQ_NEXT_DATA_FL", "IDMA_FL_SEND_NEXT_PCIEHDR", "IDMA_FL_SEND_PADDING", "IDMA_FL_SEND_COMPLETION_TO_IMSG", }; static const char * const t6_decode[] = { "IDMA_IDLE", "IDMA_PUSH_MORE_CPL_FIFO", "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", "IDMA_PHYSADDR_SEND_PCIEHDR", "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", "IDMA_PHYSADDR_SEND_PAYLOAD", "IDMA_FL_REQ_DATA_FL", "IDMA_FL_DROP", "IDMA_FL_DROP_SEND_INC", "IDMA_FL_H_REQ_HEADER_FL", "IDMA_FL_H_SEND_PCIEHDR", "IDMA_FL_H_PUSH_CPL_FIFO", "IDMA_FL_H_SEND_CPL", "IDMA_FL_H_SEND_IP_HDR_FIRST", "IDMA_FL_H_SEND_IP_HDR", "IDMA_FL_H_REQ_NEXT_HEADER_FL", "IDMA_FL_H_SEND_NEXT_PCIEHDR", "IDMA_FL_H_SEND_IP_HDR_PADDING", "IDMA_FL_D_SEND_PCIEHDR", "IDMA_FL_D_SEND_CPL_AND_IP_HDR", "IDMA_FL_D_REQ_NEXT_DATA_FL", "IDMA_FL_SEND_PCIEHDR", "IDMA_FL_PUSH_CPL_FIFO", "IDMA_FL_SEND_CPL", "IDMA_FL_SEND_PAYLOAD_FIRST", "IDMA_FL_SEND_PAYLOAD", "IDMA_FL_REQ_NEXT_DATA_FL", "IDMA_FL_SEND_NEXT_PCIEHDR", "IDMA_FL_SEND_PADDING", "IDMA_FL_SEND_COMPLETION_TO_IMSG", }; static const u32 sge_regs[] = { A_SGE_DEBUG_DATA_LOW_INDEX_2, A_SGE_DEBUG_DATA_LOW_INDEX_3, A_SGE_DEBUG_DATA_HIGH_INDEX_10, }; const char * const *sge_idma_decode; int sge_idma_decode_nstates; int i; unsigned int chip_version = chip_id(adapter); /* Select the right set of decode strings to dump depending on the * adapter chip type. */ switch (chip_version) { case CHELSIO_T4: sge_idma_decode = (const char * const *)t4_decode; sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); break; case CHELSIO_T5: sge_idma_decode = (const char * const *)t5_decode; sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); break; case CHELSIO_T6: sge_idma_decode = (const char * const *)t6_decode; sge_idma_decode_nstates = ARRAY_SIZE(t6_decode); break; default: CH_ERR(adapter, "Unsupported chip version %d\n", chip_version); return; } if (state < sge_idma_decode_nstates) CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); else CH_WARN(adapter, "idma state %d unknown\n", state); for (i = 0; i < ARRAY_SIZE(sge_regs); i++) CH_WARN(adapter, "SGE register %#x value %#x\n", sge_regs[i], t4_read_reg(adapter, sge_regs[i])); } /** * t4_sge_ctxt_flush - flush the SGE context cache * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a FW command through the given mailbox to flush the * SGE context cache. */ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type) { int ret; u32 ldst_addrspace; struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(ctxt_type == CTXT_EGRESS ? FW_LDST_ADDRSPC_SGE_EGRC : FW_LDST_ADDRSPC_SGE_INGC); c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | ldst_addrspace); c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); return ret; } /** * t4_fw_hello - establish communication with FW * @adap: the adapter * @mbox: mailbox to use for the FW command * @evt_mbox: mailbox to receive async FW events * @master: specifies the caller's willingness to be the device master * @state: returns the current device state (if non-NULL) * * Issues a command to establish communication with FW. Returns either * an error (negative integer) or the mailbox of the Master PF. */ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, enum dev_master master, enum dev_state *state) { int ret; struct fw_hello_cmd c; u32 v; unsigned int master_mbox; int retries = FW_CMD_HELLO_RETRIES; retry: memset(&c, 0, sizeof(c)); INIT_CMD(c, HELLO, WRITE); c.err_to_clearinit = cpu_to_be32( V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : M_FW_HELLO_CMD_MBMASTER) | V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | F_FW_HELLO_CMD_CLEARINIT); /* * Issue the HELLO command to the firmware. If it's not successful * but indicates that we got a "busy" or "timeout" condition, retry * the HELLO until we exhaust our retry limit. If we do exceed our * retry limit, check to see if the firmware left us any error * information and report that if so ... */ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret != FW_SUCCESS) { if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) goto retry; if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) t4_report_fw_error(adap); return ret; } v = be32_to_cpu(c.err_to_clearinit); master_mbox = G_FW_HELLO_CMD_MBMASTER(v); if (state) { if (v & F_FW_HELLO_CMD_ERR) *state = DEV_STATE_ERR; else if (v & F_FW_HELLO_CMD_INIT) *state = DEV_STATE_INIT; else *state = DEV_STATE_UNINIT; } /* * If we're not the Master PF then we need to wait around for the * Master PF Driver to finish setting up the adapter. * * Note that we also do this wait if we're a non-Master-capable PF and * there is no current Master PF; a Master PF may show up momentarily * and we wouldn't want to fail pointlessly. (This can happen when an * OS loads lots of different drivers rapidly at the same time). In * this case, the Master PF returned by the firmware will be * M_PCIE_FW_MASTER so the test below will work ... */ if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && master_mbox != mbox) { int waiting = FW_CMD_HELLO_TIMEOUT; /* * Wait for the firmware to either indicate an error or * initialized state. If we see either of these we bail out * and report the issue to the caller. If we exhaust the * "hello timeout" and we haven't exhausted our retries, try * again. Otherwise bail with a timeout error. */ for (;;) { u32 pcie_fw; msleep(50); waiting -= 50; /* * If neither Error nor Initialialized are indicated * by the firmware keep waiting till we exhaust our * timeout ... and then retry if we haven't exhausted * our retries ... */ pcie_fw = t4_read_reg(adap, A_PCIE_FW); if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { if (waiting <= 0) { if (retries-- > 0) goto retry; return -ETIMEDOUT; } continue; } /* * We either have an Error or Initialized condition * report errors preferentially. */ if (state) { if (pcie_fw & F_PCIE_FW_ERR) *state = DEV_STATE_ERR; else if (pcie_fw & F_PCIE_FW_INIT) *state = DEV_STATE_INIT; } /* * If we arrived before a Master PF was selected and * there's not a valid Master PF, grab its identity * for our caller. */ if (master_mbox == M_PCIE_FW_MASTER && (pcie_fw & F_PCIE_FW_MASTER_VLD)) master_mbox = G_PCIE_FW_MASTER(pcie_fw); break; } } return master_mbox; } /** * t4_fw_bye - end communication with FW * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a command to terminate communication with FW. */ int t4_fw_bye(struct adapter *adap, unsigned int mbox) { struct fw_bye_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, BYE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_fw_reset - issue a reset to FW * @adap: the adapter * @mbox: mailbox to use for the FW command * @reset: specifies the type of reset to perform * * Issues a reset command of the specified type to FW. */ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) { struct fw_reset_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, RESET, WRITE); c.val = cpu_to_be32(reset); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_fw_halt - issue a reset/halt to FW and put uP into RESET * @adap: the adapter * @mbox: mailbox to use for the FW RESET command (if desired) * @force: force uP into RESET even if FW RESET command fails * * Issues a RESET command to firmware (if desired) with a HALT indication * and then puts the microprocessor into RESET state. The RESET command * will only be issued if a legitimate mailbox is provided (mbox <= * M_PCIE_FW_MASTER). * * This is generally used in order for the host to safely manipulate the * adapter without fear of conflicting with whatever the firmware might * be doing. The only way out of this state is to RESTART the firmware * ... */ int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) { int ret = 0; /* * If a legitimate mailbox is provided, issue a RESET command * with a HALT indication. */ if (adap->flags & FW_OK && mbox <= M_PCIE_FW_MASTER) { struct fw_reset_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, RESET, WRITE); c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /* * Normally we won't complete the operation if the firmware RESET * command fails but if our caller insists we'll go ahead and put the * uP into RESET. This can be useful if the firmware is hung or even * missing ... We'll have to take the risk of putting the uP into * RESET without the cooperation of firmware in that case. * * We also force the firmware's HALT flag to be on in case we bypassed * the firmware RESET command above or we're dealing with old firmware * which doesn't have the HALT capability. This will serve as a flag * for the incoming firmware to know that it's coming out of a HALT * rather than a RESET ... if it's new enough to understand that ... */ if (ret == 0 || force) { t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT); } /* * And we always return the result of the firmware RESET command * even when we force the uP into RESET ... */ return ret; } /** * t4_fw_restart - restart the firmware by taking the uP out of RESET * @adap: the adapter * * Restart firmware previously halted by t4_fw_halt(). On successful * return the previous PF Master remains as the new PF Master and there * is no need to issue a new HELLO command, etc. */ int t4_fw_restart(struct adapter *adap, unsigned int mbox) { int ms; t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) return FW_SUCCESS; msleep(100); ms += 100; } return -ETIMEDOUT; } /** * t4_fw_upgrade - perform all of the steps necessary to upgrade FW * @adap: the adapter * @mbox: mailbox to use for the FW RESET command (if desired) * @fw_data: the firmware image to write * @size: image size * @force: force upgrade even if firmware doesn't cooperate * * Perform all of the steps necessary for upgrading an adapter's * firmware image. Normally this requires the cooperation of the * existing firmware in order to halt all existing activities * but if an invalid mailbox token is passed in we skip that step * (though we'll still put the adapter microprocessor into RESET in * that case). * * On successful return the new firmware will have been loaded and * the adapter will have been fully RESET losing all previous setup * state. On unsuccessful return the adapter may be completely hosed ... * positive errno indicates that the adapter is ~probably~ intact, a * negative errno indicates that things are looking bad ... */ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, const u8 *fw_data, unsigned int size, int force) { const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; unsigned int bootstrap = be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; int ret; if (!t4_fw_matches_chip(adap, fw_hdr)) return -EINVAL; if (!bootstrap) { ret = t4_fw_halt(adap, mbox, force); if (ret < 0 && !force) return ret; } ret = t4_load_fw(adap, fw_data, size); if (ret < 0 || bootstrap) return ret; return t4_fw_restart(adap, mbox); } /** * t4_fw_initialize - ask FW to initialize the device * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a command to FW to partially initialize the device. This * performs initialization that generally doesn't depend on user input. */ int t4_fw_initialize(struct adapter *adap, unsigned int mbox) { struct fw_initialize_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, INITIALIZE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_query_params_rw - query FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF * @vf: the VF * @nparams: the number of parameters * @params: the parameter names * @val: the parameter values * @rw: Write and read flag * * Reads the value of FW or device parameters. Up to 7 parameters can be * queried at once. */ int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val, int rw) { int i, ret; struct fw_params_cmd c; __be32 *p = &c.param[0].mnem; if (nparams > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) | V_FW_PARAMS_CMD_VFN(vf)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); for (i = 0; i < nparams; i++) { *p++ = cpu_to_be32(*params++); if (rw) *p = cpu_to_be32(*(val + i)); p++; } ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) *val++ = be32_to_cpu(*p); return ret; } int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val) { return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0); } /** * t4_set_params_timeout - sets FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF * @vf: the VF * @nparams: the number of parameters * @params: the parameter names * @val: the parameter values * @timeout: the timeout time * * Sets the value of FW or device parameters. Up to 7 parameters can be * specified at once. */ int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val, int timeout) { struct fw_params_cmd c; __be32 *p = &c.param[0].mnem; if (nparams > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) | V_FW_PARAMS_CMD_VFN(vf)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); while (nparams--) { *p++ = cpu_to_be32(*params++); *p++ = cpu_to_be32(*val++); } return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); } /** * t4_set_params - sets FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF * @vf: the VF * @nparams: the number of parameters * @params: the parameter names * @val: the parameter values * * Sets the value of FW or device parameters. Up to 7 parameters can be * specified at once. */ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val) { return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, FW_CMD_MAX_TIMEOUT); } /** * t4_cfg_pfvf - configure PF/VF resource limits * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF being configured * @vf: the VF being configured * @txq: the max number of egress queues * @txq_eth_ctrl: the max number of egress Ethernet or control queues * @rxqi: the max number of interrupt-capable ingress queues * @rxq: the max number of interruptless ingress queues * @tc: the PCI traffic class * @vi: the max number of virtual interfaces * @cmask: the channel access rights mask for the PF/VF * @pmask: the port access rights mask for the PF/VF * @nexact: the maximum number of exact MPS filters * @rcaps: read capabilities * @wxcaps: write/execute capabilities * * Configures resource limits and capabilities for a physical or virtual * function. */ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi, unsigned int cmask, unsigned int pmask, unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) { struct fw_pfvf_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | V_FW_PFVF_CMD_VFN(vf)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) | V_FW_PFVF_CMD_NIQ(rxq)); c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) | V_FW_PFVF_CMD_PMASK(pmask) | V_FW_PFVF_CMD_NEQ(txq)); c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) | V_FW_PFVF_CMD_NEXACTF(nexact)); c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) | V_FW_PFVF_CMD_WX_CAPS(wxcaps) | V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_alloc_vi_func - allocate a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @port: physical port associated with the VI * @pf: the PF owning the VI * @vf: the VF owning the VI * @nmac: number of MAC addresses needed (1 to 5) * @mac: the MAC addresses of the VI * @rss_size: size of RSS table slice associated with this VI * @portfunc: which Port Application Function MAC Address is desired * @idstype: Intrusion Detection Type * * Allocates a virtual interface for the given physical port. If @mac is * not %NULL it contains the MAC addresses of the VI as assigned by FW. * If @rss_size is %NULL the VI is not assigned any RSS slice by FW. * @mac should be large enough to hold @nmac Ethernet addresses, they are * stored consecutively so the space needed is @nmac * 6 bytes. * Returns a negative error number or the non-negative VI id. */ int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, u16 *rss_size, uint8_t *vfvld, uint16_t *vin, unsigned int portfunc, unsigned int idstype) { int ret; struct fw_vi_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | V_FW_VI_CMD_FUNC(portfunc)); c.portid_pkd = V_FW_VI_CMD_PORTID(port); c.nmac = nmac - 1; if(!rss_size) c.norss_rsssize = F_FW_VI_CMD_NORSS; ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret) return ret; ret = G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid)); if (mac) { memcpy(mac, c.mac, sizeof(c.mac)); switch (nmac) { case 5: memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); case 4: memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); case 3: memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); case 2: memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); } } if (rss_size) *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); if (vfvld) { *vfvld = adap->params.viid_smt_extn_support ? G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)) : G_FW_VIID_VIVLD(ret); } if (vin) { *vin = adap->params.viid_smt_extn_support ? G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)) : G_FW_VIID_VIN(ret); } return ret; } /** * t4_alloc_vi - allocate an [Ethernet Function] virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @port: physical port associated with the VI * @pf: the PF owning the VI * @vf: the VF owning the VI * @nmac: number of MAC addresses needed (1 to 5) * @mac: the MAC addresses of the VI * @rss_size: size of RSS table slice associated with this VI * * backwards compatible and convieniance routine to allocate a Virtual * Interface with a Ethernet Port Application Function and Intrustion * Detection System disabled. */ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, u16 *rss_size, uint8_t *vfvld, uint16_t *vin) { return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, vfvld, vin, FW_VI_FUNC_ETH, 0); } /** * t4_free_vi - free a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the VI * @vf: the VF owning the VI * @viid: virtual interface identifiler * * Free a previously allocated virtual interface. */ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int viid) { struct fw_vi_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); } /** * t4_set_rxmode - set Rx properties of a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @mtu: the new MTU or -1 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change * @sleep_ok: if true we may sleep while awaiting command completion * * Sets Rx properties of a virtual interface. */ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok) { struct fw_vi_rxmode_cmd c; /* convert to FW values */ if (mtu < 0) mtu = M_FW_VI_RXMODE_CMD_MTU; if (promisc < 0) promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; if (all_multi < 0) all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; if (bcast < 0) bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; if (vlanex < 0) vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } /** * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support * @adap: the adapter * @viid: the VI id * @mac: the MAC address * @mask: the mask * @vni: the VNI id for the tunnel protocol * @vni_mask: mask for the VNI id * @dip_hit: to enable DIP match for the MPS entry * @lookup_type: MAC address for inner (1) or outer (0) header * @sleep_ok: call is allowed to sleep * * Allocates an MPS entry with specified MAC address and VNI value. * * Returns a negative error number or the allocated index for this mac. */ int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int vni, unsigned int vni_mask, u8 dip_hit, u8 lookup_type, bool sleep_ok) { struct fw_vi_mac_cmd c; struct fw_vi_mac_vni *p = c.u.exact_vni; int ret = 0; u32 val; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid)); val = V_FW_CMD_LEN16(1) | V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC_VNI); c.freemacs_to_len16 = cpu_to_be32(val); p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask)); p->lookup_type_to_vni = cpu_to_be32(V_FW_VI_MAC_CMD_VNI(vni) | V_FW_VI_MAC_CMD_DIP_HIT(dip_hit) | V_FW_VI_MAC_CMD_LOOKUP_TYPE(lookup_type)); p->vni_mask_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_VNI_MASK(vni_mask)); ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); if (ret == 0) ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); return ret; } /** * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam * @adap: the adapter * @viid: the VI id * @mac: the MAC address * @mask: the mask * @idx: index at which to add this entry * @port_id: the port index * @lookup_type: MAC address for inner (1) or outer (0) header * @sleep_ok: call is allowed to sleep * * Adds the mac entry at the specified index using raw mac interface. * * Returns a negative error number or the allocated index for this mac. */ int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int idx, u8 lookup_type, u8 port_id, bool sleep_ok) { int ret = 0; struct fw_vi_mac_cmd c; struct fw_vi_mac_raw *p = &c.u.raw; u32 val; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid)); val = V_FW_CMD_LEN16(1) | V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW); c.freemacs_to_len16 = cpu_to_be32(val); /* Specify that this is an inner mac address */ p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx)); /* Lookup Type. Outer header: 0, Inner header: 1 */ p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) | V_DATAPORTNUM(port_id)); /* Lookup mask and port mask */ p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) | V_DATAPORTNUM(M_DATAPORTNUM)); /* Copy the address and the mask */ memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN); memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN); ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); if (ret == 0) { ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd)); if (ret != idx) ret = -ENOMEM; } return ret; } /** * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @free: if true any existing filters for this VI id are first removed * @naddr: the number of MAC addresses to allocate filters for (up to 7) * @addr: the MAC address(es) * @idx: where to store the index of each allocated filter * @hash: pointer to hash address filter bitmap * @sleep_ok: call is allowed to sleep * * Allocates an exact-match filter for each of the supplied addresses and * sets it to the corresponding address. If @idx is not %NULL it should * have at least @naddr entries, each of which will be set to the index of * the filter allocated for the corresponding MAC address. If a filter * could not be allocated for an address its index is set to 0xffff. * If @hash is not %NULL addresses that fail to allocate an exact filter * are hashed and update the hash filter bitmap pointed at by @hash. * * Returns a negative error number or the number of filters allocated. */ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) { int offset, ret = 0; struct fw_vi_mac_cmd c; unsigned int nfilters = 0; unsigned int max_naddr = adap->chip_params->mps_tcam_size; unsigned int rem = naddr; if (naddr > max_naddr) return -EINVAL; for (offset = 0; offset < naddr ; /**/) { unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ? rem : ARRAY_SIZE(c.u.exact)); size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, u.exact[fw_naddr]), 16); struct fw_vi_mac_exact *p; int i; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_CMD_EXEC(free) | V_FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) | V_FW_CMD_LEN16(len16)); for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); } /* * It's okay if we run out of space in our MAC address arena. * Some of the addresses we submit may get stored so we need * to run through the reply to see what the results were ... */ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); if (ret && ret != -FW_ENOMEM) break; for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { u16 index = G_FW_VI_MAC_CMD_IDX( be16_to_cpu(p->valid_to_idx)); if (idx) idx[offset+i] = (index >= max_naddr ? 0xffff : index); if (index < max_naddr) nfilters++; else if (hash) *hash |= (1ULL << hash_mac_addr(addr[offset+i])); } free = false; offset += fw_naddr; rem -= fw_naddr; } if (ret == 0 || ret == -FW_ENOMEM) ret = nfilters; return ret; } /** * t4_free_encap_mac_filt - frees MPS entry at given index * @adap: the adapter * @viid: the VI id * @idx: index of MPS entry to be freed * @sleep_ok: call is allowed to sleep * * Frees the MPS entry at supplied index * * Returns a negative error number or zero on success */ int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, int idx, bool sleep_ok) { struct fw_vi_mac_exact *p; struct fw_vi_mac_cmd c; u8 addr[] = {0,0,0,0,0,0}; int ret = 0; u32 exact; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_CMD_EXEC(0) | V_FW_VI_MAC_CMD_VIID(viid)); exact = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC); c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) | exact | V_FW_CMD_LEN16(1)); p = c.u.exact; p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | V_FW_VI_MAC_CMD_IDX(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); return ret; } /** * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam * @adap: the adapter * @viid: the VI id * @addr: the MAC address * @mask: the mask * @idx: index of the entry in mps tcam * @lookup_type: MAC address for inner (1) or outer (0) header * @port_id: the port index * @sleep_ok: call is allowed to sleep * * Removes the mac entry at the specified index using raw mac interface. * * Returns a negative error number on failure. */ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int idx, u8 lookup_type, u8 port_id, bool sleep_ok) { struct fw_vi_mac_cmd c; struct fw_vi_mac_raw *p = &c.u.raw; u32 raw; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_CMD_EXEC(0) | V_FW_VI_MAC_CMD_VIID(viid)); raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW); c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) | raw | V_FW_CMD_LEN16(1)); p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) | FW_VI_MAC_ID_BASED_FREE); /* Lookup Type. Outer header: 0, Inner header: 1 */ p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) | V_DATAPORTNUM(port_id)); /* Lookup mask and port mask */ p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) | V_DATAPORTNUM(M_DATAPORTNUM)); /* Copy the address and the mask */ memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN); memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN); return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); } /** * t4_free_mac_filt - frees exact-match filters of given MAC addresses * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @naddr: the number of MAC addresses to allocate filters for (up to 7) * @addr: the MAC address(es) * @sleep_ok: call is allowed to sleep * * Frees the exact-match filter for each of the supplied addresses * * Returns a negative error number or the number of filters freed. */ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, unsigned int naddr, const u8 **addr, bool sleep_ok) { int offset, ret = 0; struct fw_vi_mac_cmd c; unsigned int nfilters = 0; unsigned int max_naddr = adap->chip_params->mps_tcam_size; unsigned int rem = naddr; if (naddr > max_naddr) return -EINVAL; for (offset = 0; offset < (int)naddr ; /**/) { unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ? rem : ARRAY_SIZE(c.u.exact)); size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, u.exact[fw_naddr]), 16); struct fw_vi_mac_exact *p; int i; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_CMD_EXEC(0) | V_FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) | V_FW_CMD_LEN16(len16)); for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) { p->valid_to_idx = cpu_to_be16( F_FW_VI_MAC_CMD_VALID | V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE)); memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); } ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); if (ret) break; for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { u16 index = G_FW_VI_MAC_CMD_IDX( be16_to_cpu(p->valid_to_idx)); if (index < max_naddr) nfilters++; } offset += fw_naddr; rem -= fw_naddr; } if (ret == 0) ret = nfilters; return ret; } /** * t4_change_mac - modifies the exact-match filter for a MAC address * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @idx: index of existing filter for old value of MAC address, or -1 * @addr: the new MAC address value * @persist: whether a new MAC allocation should be persistent * @smt_idx: add MAC to SMT and return its index, or NULL * * Modifies an exact-match filter and sets it to the new MAC address if * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the * latter case the address is added persistently if @persist is %true. * * Note that in general it is not possible to modify the value of a given * filter so the generic way to modify an address filter is to free the one * being used by the old address value and allocate a new filter for the * new address value. * * Returns a negative error number or the index of the filter with the new * MAC value. Note that this index may differ from @idx. */ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int idx, const u8 *addr, bool persist, uint16_t *smt_idx) { int ret, mode; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; if (idx < 0) /* new allocation */ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1)); p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | V_FW_VI_MAC_CMD_IDX(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); if (ret >= max_mac_addr) ret = -ENOMEM; if (smt_idx) { if (adap->params.viid_smt_extn_support) *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid)); else { if (chip_id(adap) <= CHELSIO_T5) *smt_idx = (viid & M_FW_VIID_VIN) << 1; else *smt_idx = viid & M_FW_VIID_VIN; } } } return ret; } /** * t4_set_addr_hash - program the MAC inexact-match hash filter * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @ucast: whether the hash filter should also match unicast addresses * @vec: the value to be written to the hash filter * @sleep_ok: call is allowed to sleep * * Sets the 64-bit inexact-match hash filter for a virtual interface. */ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, bool ucast, u64 vec, bool sleep_ok) { struct fw_vi_mac_cmd c; u32 val; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid)); val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) | V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1); c.freemacs_to_len16 = cpu_to_be32(val); c.u.hash.hashvec = cpu_to_be64(vec); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } /** * t4_enable_vi_params - enable/disable a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @rx_en: 1=enable Rx, 0=disable Rx * @tx_en: 1=enable Tx, 0=disable Tx * @dcb_en: 1=enable delivery of Data Center Bridging messages. * * Enables/disables a virtual interface. Note that setting DCB Enable * only makes sense when enabling a Virtual Interface ... */ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) { struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | V_FW_VI_ENABLE_CMD_EEN(tx_en) | V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | FW_LEN16(c)); return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); } /** * t4_enable_vi - enable/disable a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @rx_en: 1=enable Rx, 0=disable Rx * @tx_en: 1=enable Tx, 0=disable Tx * * Enables/disables a virtual interface. Note that setting DCB Enable * only makes sense when enabling a Virtual Interface ... */ int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en) { return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); } /** * t4_identify_port - identify a VI's port by blinking its LED * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @nblinks: how many times to blink LED at 2.5 Hz * * Identifies a VI's port by blinking its LED. */ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, unsigned int nblinks) { struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); c.blinkdur = cpu_to_be16(nblinks); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_iq_stop - stop an ingress queue and its FLs * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queues * @vf: the VF owning the queues * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) * @iqid: ingress queue id * @fl0id: FL0 queue id or 0xffff if no attached FL0 * @fl1id: FL1 queue id or 0xffff if no attached FL1 * * Stops an ingress queue and its associated FLs, if any. This causes * any current or future data/messages destined for these queues to be * tossed. */ int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id) { struct fw_iq_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | V_FW_IQ_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c)); c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); c.iqid = cpu_to_be16(iqid); c.fl0id = cpu_to_be16(fl0id); c.fl1id = cpu_to_be16(fl1id); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_iq_free - free an ingress queue and its FLs * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queues * @vf: the VF owning the queues * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) * @iqid: ingress queue id * @fl0id: FL0 queue id or 0xffff if no attached FL0 * @fl1id: FL1 queue id or 0xffff if no attached FL1 * * Frees an ingress queue and its associated FLs, if any. */ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id) { struct fw_iq_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | V_FW_IQ_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); c.iqid = cpu_to_be16(iqid); c.fl0id = cpu_to_be16(fl0id); c.fl1id = cpu_to_be16(fl1id); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_eth_eq_stop - stop an Ethernet egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queues * @vf: the VF owning the queues * @eqid: egress queue id * * Stops an Ethernet egress queue. The queue can be reinitialized or * freed but is not otherwise functional after this call. */ int t4_eth_eq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_eth_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) | V_FW_EQ_ETH_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_EQSTOP | FW_LEN16(c)); c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_eth_eq_free - free an Ethernet egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees an Ethernet egress queue. */ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_eth_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) | V_FW_EQ_ETH_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_ctrl_eq_free - free a control egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees a control egress queue. */ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_ctrl_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) | V_FW_EQ_CTRL_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_ofld_eq_free - free an offload egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees a control egress queue. */ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_ofld_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) | V_FW_EQ_OFLD_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_link_down_rc_str - return a string for a Link Down Reason Code * @link_down_rc: Link Down Reason Code * * Returns a string representation of the Link Down Reason Code. */ const char *t4_link_down_rc_str(unsigned char link_down_rc) { static const char *reason[] = { "Link Down", "Remote Fault", "Auto-negotiation Failure", "Reserved3", "Insufficient Airflow", "Unable To Determine Reason", "No RX Signal Detected", "Reserved7", }; if (link_down_rc >= ARRAY_SIZE(reason)) return "Bad Reason Code"; return reason[link_down_rc]; } /* * Return the highest speed set in the port capabilities, in Mb/s. */ unsigned int fwcap_to_speed(uint32_t caps) { #define TEST_SPEED_RETURN(__caps_speed, __speed) \ do { \ if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ return __speed; \ } while (0) TEST_SPEED_RETURN(400G, 400000); TEST_SPEED_RETURN(200G, 200000); TEST_SPEED_RETURN(100G, 100000); TEST_SPEED_RETURN(50G, 50000); TEST_SPEED_RETURN(40G, 40000); TEST_SPEED_RETURN(25G, 25000); TEST_SPEED_RETURN(10G, 10000); TEST_SPEED_RETURN(1G, 1000); TEST_SPEED_RETURN(100M, 100); #undef TEST_SPEED_RETURN return 0; } /* * Return the port capabilities bit for the given speed, which is in Mb/s. */ uint32_t speed_to_fwcap(unsigned int speed) { #define TEST_SPEED_RETURN(__caps_speed, __speed) \ do { \ if (speed == __speed) \ return FW_PORT_CAP32_SPEED_##__caps_speed; \ } while (0) TEST_SPEED_RETURN(400G, 400000); TEST_SPEED_RETURN(200G, 200000); TEST_SPEED_RETURN(100G, 100000); TEST_SPEED_RETURN(50G, 50000); TEST_SPEED_RETURN(40G, 40000); TEST_SPEED_RETURN(25G, 25000); TEST_SPEED_RETURN(10G, 10000); TEST_SPEED_RETURN(1G, 1000); TEST_SPEED_RETURN(100M, 100); #undef TEST_SPEED_RETURN return 0; } /* * Return the port capabilities bit for the highest speed in the capabilities. */ uint32_t fwcap_top_speed(uint32_t caps) { #define TEST_SPEED_RETURN(__caps_speed) \ do { \ if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ return FW_PORT_CAP32_SPEED_##__caps_speed; \ } while (0) TEST_SPEED_RETURN(400G); TEST_SPEED_RETURN(200G); TEST_SPEED_RETURN(100G); TEST_SPEED_RETURN(50G); TEST_SPEED_RETURN(40G); TEST_SPEED_RETURN(25G); TEST_SPEED_RETURN(10G); TEST_SPEED_RETURN(1G); TEST_SPEED_RETURN(100M); #undef TEST_SPEED_RETURN return 0; } /** * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value * * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new * 32-bit Port Capabilities value. */ static uint32_t lstatus_to_fwcap(u32 lstatus) { uint32_t linkattr = 0; /* * Unfortunately the format of the Link Status in the old * 16-bit Port Information message isn't the same as the * 16-bit Port Capabilities bitfield used everywhere else ... */ if (lstatus & F_FW_PORT_CMD_RXPAUSE) linkattr |= FW_PORT_CAP32_FC_RX; if (lstatus & F_FW_PORT_CMD_TXPAUSE) linkattr |= FW_PORT_CAP32_FC_TX; if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) linkattr |= FW_PORT_CAP32_SPEED_100M; if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) linkattr |= FW_PORT_CAP32_SPEED_1G; if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) linkattr |= FW_PORT_CAP32_SPEED_10G; if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) linkattr |= FW_PORT_CAP32_SPEED_25G; if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) linkattr |= FW_PORT_CAP32_SPEED_40G; if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) linkattr |= FW_PORT_CAP32_SPEED_100G; return linkattr; } /* * Updates all fields owned by the common code in port_info and link_config * based on information provided by the firmware. Does not touch any * requested_* field. */ static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p, enum fw_port_action action, bool *mod_changed, bool *link_changed) { struct link_config old_lc, *lc = &pi->link_cfg; unsigned char fc; u32 stat, linkattr; int old_ptype, old_mtype; old_ptype = pi->port_type; old_mtype = pi->mod_type; old_lc = *lc; if (action == FW_PORT_ACTION_GET_PORT_INFO) { stat = be32_to_cpu(p->u.info.lstatus_to_modtype); pi->port_type = G_FW_PORT_CMD_PTYPE(stat); pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat); pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ? G_FW_PORT_CMD_MDIOADDR(stat) : -1; lc->pcaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.pcap)); lc->acaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.acap)); lc->lpacaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.lpacap)); lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat); linkattr = lstatus_to_fwcap(stat); } else if (action == FW_PORT_ACTION_GET_PORT_INFO32) { stat = be32_to_cpu(p->u.info32.lstatus32_to_cbllen32); pi->port_type = G_FW_PORT_CMD_PORTTYPE32(stat); pi->mod_type = G_FW_PORT_CMD_MODTYPE32(stat); pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP32 ? G_FW_PORT_CMD_MDIOADDR32(stat) : -1; lc->pcaps = be32_to_cpu(p->u.info32.pcaps32); lc->acaps = be32_to_cpu(p->u.info32.acaps32); lc->lpacaps = be32_to_cpu(p->u.info32.lpacaps32); lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS32) != 0; lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC32(stat); linkattr = be32_to_cpu(p->u.info32.linkattr32); } else { CH_ERR(pi->adapter, "bad port_info action 0x%x\n", action); return; } lc->speed = fwcap_to_speed(linkattr); lc->fec = fwcap_to_fec(linkattr, true); fc = 0; if (linkattr & FW_PORT_CAP32_FC_RX) fc |= PAUSE_RX; if (linkattr & FW_PORT_CAP32_FC_TX) fc |= PAUSE_TX; lc->fc = fc; if (mod_changed != NULL) *mod_changed = false; if (link_changed != NULL) *link_changed = false; if (old_ptype != pi->port_type || old_mtype != pi->mod_type || old_lc.pcaps != lc->pcaps) { if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) lc->fec_hint = fwcap_to_fec(lc->acaps, true); if (mod_changed != NULL) *mod_changed = true; } if (old_lc.link_ok != lc->link_ok || old_lc.speed != lc->speed || old_lc.fec != lc->fec || old_lc.fc != lc->fc) { if (link_changed != NULL) *link_changed = true; } } /** * t4_update_port_info - retrieve and update port information if changed * @pi: the port_info * * We issue a Get Port Information Command to the Firmware and, if * successful, we check to see if anything is different from what we * last recorded and update things accordingly. */ int t4_update_port_info(struct port_info *pi) { struct adapter *sc = pi->adapter; struct fw_port_cmd cmd; enum fw_port_action action; int ret; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_PORT_CMD_PORTID(pi->tx_chan)); action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 : FW_PORT_ACTION_GET_PORT_INFO; cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) | FW_LEN16(cmd)); ret = t4_wr_mbox_ns(sc, sc->mbox, &cmd, sizeof(cmd), &cmd); if (ret) return ret; handle_port_info(pi, &cmd, action, NULL, NULL); return 0; } /** * t4_handle_fw_rpl - process a FW reply message * @adap: the adapter * @rpl: start of the FW message * * Processes a FW message, such as link state change messages. */ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) { u8 opcode = *(const u8 *)rpl; const struct fw_port_cmd *p = (const void *)rpl; enum fw_port_action action = G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); bool mod_changed, link_changed; if (opcode == FW_PORT_CMD && (action == FW_PORT_ACTION_GET_PORT_INFO || action == FW_PORT_ACTION_GET_PORT_INFO32)) { /* link/module state change message */ int i; int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); struct port_info *pi = NULL; struct link_config *lc; for_each_port(adap, i) { pi = adap2pinfo(adap, i); if (pi->tx_chan == chan) break; } lc = &pi->link_cfg; PORT_LOCK(pi); handle_port_info(pi, p, action, &mod_changed, &link_changed); PORT_UNLOCK(pi); if (mod_changed) t4_os_portmod_changed(pi); if (link_changed) { PORT_LOCK(pi); t4_os_link_changed(pi); PORT_UNLOCK(pi); } } else { CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode); return -EINVAL; } return 0; } /** * get_pci_mode - determine a card's PCI mode * @adapter: the adapter * @p: where to store the PCI settings * * Determines a card's PCI mode and associated parameters, such as speed * and width. */ static void get_pci_mode(struct adapter *adapter, struct pci_params *p) { u16 val; u32 pcie_cap; pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); if (pcie_cap) { t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); p->speed = val & PCI_EXP_LNKSTA_CLS; p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; } } struct flash_desc { u32 vendor_and_model_id; u32 size_mb; }; int t4_get_flash_params(struct adapter *adapter) { /* * Table for non-standard supported Flash parts. Note, all Flash * parts must have 64KB sectors. */ static struct flash_desc supported_flash[] = { { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ }; int ret; u32 flashid = 0; unsigned int part, manufacturer; unsigned int density, size = 0; /* * Issue a Read ID Command to the Flash part. We decode supported * Flash parts and their sizes from this. There's a newer Query * Command which can retrieve detailed geometry information but many * Flash parts don't support it. */ ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); if (!ret) ret = sf1_read(adapter, 3, 0, 1, &flashid); t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ if (ret < 0) return ret; /* * Check to see if it's one of our non-standard supported Flash parts. */ for (part = 0; part < ARRAY_SIZE(supported_flash); part++) if (supported_flash[part].vendor_and_model_id == flashid) { adapter->params.sf_size = supported_flash[part].size_mb; adapter->params.sf_nsec = adapter->params.sf_size / SF_SEC_SIZE; goto found; } /* * Decode Flash part size. The code below looks repetative with * common encodings, but that's not guaranteed in the JEDEC * specification for the Read JADEC ID command. The only thing that * we're guaranteed by the JADEC specification is where the * Manufacturer ID is in the returned result. After that each * Manufacturer ~could~ encode things completely differently. * Note, all Flash parts must have 64KB sectors. */ manufacturer = flashid & 0xff; switch (manufacturer) { case 0x20: /* Micron/Numonix */ /* * This Density -> Size decoding table is taken from Micron * Data Sheets. */ density = (flashid >> 16) & 0xff; switch (density) { case 0x14: size = 1 << 20; break; /* 1MB */ case 0x15: size = 1 << 21; break; /* 2MB */ case 0x16: size = 1 << 22; break; /* 4MB */ case 0x17: size = 1 << 23; break; /* 8MB */ case 0x18: size = 1 << 24; break; /* 16MB */ case 0x19: size = 1 << 25; break; /* 32MB */ case 0x20: size = 1 << 26; break; /* 64MB */ case 0x21: size = 1 << 27; break; /* 128MB */ case 0x22: size = 1 << 28; break; /* 256MB */ } break; case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */ /* * This Density -> Size decoding table is taken from ISSI * Data Sheets. */ density = (flashid >> 16) & 0xff; switch (density) { case 0x16: size = 1 << 25; break; /* 32MB */ case 0x17: size = 1 << 26; break; /* 64MB */ } break; case 0xc2: /* Macronix */ /* * This Density -> Size decoding table is taken from Macronix * Data Sheets. */ density = (flashid >> 16) & 0xff; switch (density) { case 0x17: size = 1 << 23; break; /* 8MB */ case 0x18: size = 1 << 24; break; /* 16MB */ } break; case 0xef: /* Winbond */ /* * This Density -> Size decoding table is taken from Winbond * Data Sheets. */ density = (flashid >> 16) & 0xff; switch (density) { case 0x17: size = 1 << 23; break; /* 8MB */ case 0x18: size = 1 << 24; break; /* 16MB */ } break; } /* If we didn't recognize the FLASH part, that's no real issue: the * Hardware/Software contract says that Hardware will _*ALWAYS*_ * use a FLASH part which is at least 4MB in size and has 64KB * sectors. The unrecognized FLASH part is likely to be much larger * than 4MB, but that's all we really need. */ if (size == 0) { CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid); size = 1 << 22; } /* * Store decoded Flash size and fall through into vetting code. */ adapter->params.sf_size = size; adapter->params.sf_nsec = size / SF_SEC_SIZE; found: /* * We should ~probably~ reject adapters with FLASHes which are too * small but we have some legacy FPGAs with small FLASHes that we'd * still like to use. So instead we emit a scary message ... */ if (adapter->params.sf_size < FLASH_MIN_SIZE) CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n", flashid, adapter->params.sf_size, FLASH_MIN_SIZE); return 0; } static void set_pcie_completion_timeout(struct adapter *adapter, u8 range) { u16 val; u32 pcie_cap; pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); if (pcie_cap) { t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); val &= 0xfff0; val |= range ; t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); } } const struct chip_params *t4_get_chip_params(int chipid) { static const struct chip_params chip_params[] = { { /* T4 */ .nchan = NCHAN, .pm_stats_cnt = PM_NSTATS, .cng_ch_bits_log = 2, .nsched_cls = 15, .cim_num_obq = CIM_NUM_OBQ, .mps_rplc_size = 128, .vfcount = 128, .sge_fl_db = F_DBPRIO, .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES, .rss_nentries = RSS_NENTRIES, }, { /* T5 */ .nchan = NCHAN, .pm_stats_cnt = PM_NSTATS, .cng_ch_bits_log = 2, .nsched_cls = 16, .cim_num_obq = CIM_NUM_OBQ_T5, .mps_rplc_size = 128, .vfcount = 128, .sge_fl_db = F_DBPRIO | F_DBTYPE, .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, .rss_nentries = RSS_NENTRIES, }, { /* T6 */ .nchan = T6_NCHAN, .pm_stats_cnt = T6_PM_NSTATS, .cng_ch_bits_log = 3, .nsched_cls = 16, .cim_num_obq = CIM_NUM_OBQ_T5, .mps_rplc_size = 256, .vfcount = 256, .sge_fl_db = 0, .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, .rss_nentries = T6_RSS_NENTRIES, }, }; chipid -= CHELSIO_T4; if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params)) return NULL; return &chip_params[chipid]; } /** * t4_prep_adapter - prepare SW and HW for operation * @adapter: the adapter * @buf: temporary space of at least VPD_LEN size provided by the caller. * * Initialize adapter SW state for the various HW modules, set initial * values for some adapter tunables, take PHYs out of reset, and * initialize the MDIO interface. */ int t4_prep_adapter(struct adapter *adapter, u32 *buf) { int ret; uint16_t device_id; uint32_t pl_rev; get_pci_mode(adapter, &adapter->params.pci); pl_rev = t4_read_reg(adapter, A_PL_REV); adapter->params.chipid = G_CHIPID(pl_rev); adapter->params.rev = G_REV(pl_rev); if (adapter->params.chipid == 0) { /* T4 did not have chipid in PL_REV (T5 onwards do) */ adapter->params.chipid = CHELSIO_T4; /* T4A1 chip is not supported */ if (adapter->params.rev == 1) { CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n"); return -EINVAL; } } adapter->chip_params = t4_get_chip_params(chip_id(adapter)); if (adapter->chip_params == NULL) return -EINVAL; adapter->params.pci.vpd_cap_addr = t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); ret = t4_get_flash_params(adapter); if (ret < 0) return ret; /* Cards with real ASICs have the chipid in the PCIe device id */ t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id); if (device_id >> 12 == chip_id(adapter)) adapter->params.cim_la_size = CIMLA_SIZE; else { /* FPGA */ adapter->params.fpga = 1; adapter->params.cim_la_size = 2 * CIMLA_SIZE; } ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf); if (ret < 0) return ret; init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); /* * Default port and clock for debugging in case we can't reach FW. */ adapter->params.nports = 1; adapter->params.portvec = 1; adapter->params.vpd.cclk = 50000; /* Set pci completion timeout value to 4 seconds. */ set_pcie_completion_timeout(adapter, 0xd); return 0; } /** * t4_shutdown_adapter - shut down adapter, host & wire * @adapter: the adapter * * Perform an emergency shutdown of the adapter and stop it from * continuing any further communication on the ports or DMA to the * host. This is typically used when the adapter and/or firmware * have crashed and we want to prevent any further accidental * communication with the rest of the world. This will also force * the port Link Status to go down -- if register writes work -- * which should help our peers figure out that we're down. */ int t4_shutdown_adapter(struct adapter *adapter) { int port; t4_intr_disable(adapter); t4_write_reg(adapter, A_DBG_GPIO_EN, 0); for_each_port(adapter, port) { u32 a_port_cfg = is_t4(adapter) ? PORT_REG(port, A_XGMAC_PORT_CFG) : T5_PORT_REG(port, A_MAC_PORT_CFG); t4_write_reg(adapter, a_port_cfg, t4_read_reg(adapter, a_port_cfg) & ~V_SIGNAL_DET(1)); } t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0); return 0; } /** * t4_bar2_sge_qregs - return BAR2 SGE Queue register information * @adapter: the adapter * @qid: the Queue ID * @qtype: the Ingress or Egress type for @qid * @user: true if this request is for a user mode queue * @pbar2_qoffset: BAR2 Queue Offset * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues * * Returns the BAR2 SGE Queue Registers information associated with the * indicated Absolute Queue ID. These are passed back in return value * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. * * This may return an error which indicates that BAR2 SGE Queue * registers aren't available. If an error is not returned, then the * following values are returned: * * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid * * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which * require the "Inferred Queue ID" ability may be used. E.g. the * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, * then these "Inferred Queue ID" register may not be used. */ int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, enum t4_bar2_qtype qtype, int user, u64 *pbar2_qoffset, unsigned int *pbar2_qid) { unsigned int page_shift, page_size, qpp_shift, qpp_mask; u64 bar2_page_offset, bar2_qoffset; unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; /* T4 doesn't support BAR2 SGE Queue registers for kernel * mode queues. */ if (!user && is_t4(adapter)) return -EINVAL; /* Get our SGE Page Size parameters. */ page_shift = adapter->params.sge.page_shift; page_size = 1 << page_shift; /* Get the right Queues per Page parameters for our Queue. */ qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ? adapter->params.sge.eq_s_qpp : adapter->params.sge.iq_s_qpp); qpp_mask = (1 << qpp_shift) - 1; /* Calculate the basics of the BAR2 SGE Queue register area: * o The BAR2 page the Queue registers will be in. * o The BAR2 Queue ID. * o The BAR2 Queue ID Offset into the BAR2 page. */ bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); bar2_qid = qid & qpp_mask; bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; /* If the BAR2 Queue ID Offset is less than the Page Size, then the * hardware will infer the Absolute Queue ID simply from the writes to * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply * write to the first BAR2 SGE Queue Area within the BAR2 Page with * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID * from the BAR2 Page and BAR2 Queue ID. * * One important censequence of this is that some BAR2 SGE registers * have a "Queue ID" field and we can write the BAR2 SGE Queue ID * there. But other registers synthesize the SGE Queue ID purely * from the writes to the registers -- the Write Combined Doorbell * Buffer is a good example. These BAR2 SGE Registers are only * available for those BAR2 SGE Register areas where the SGE Absolute * Queue ID can be inferred from simple writes. */ bar2_qoffset = bar2_page_offset; bar2_qinferred = (bar2_qid_offset < page_size); if (bar2_qinferred) { bar2_qoffset += bar2_qid_offset; bar2_qid = 0; } *pbar2_qoffset = bar2_qoffset; *pbar2_qid = bar2_qid; return 0; } /** * t4_init_devlog_params - initialize adapter->params.devlog * @adap: the adapter * @fw_attach: whether we can talk to the firmware * * Initialize various fields of the adapter's Firmware Device Log * Parameters structure. */ int t4_init_devlog_params(struct adapter *adap, int fw_attach) { struct devlog_params *dparams = &adap->params.devlog; u32 pf_dparams; unsigned int devlog_meminfo; struct fw_devlog_cmd devlog_cmd; int ret; /* If we're dealing with newer firmware, the Device Log Paramerters * are stored in a designated register which allows us to access the * Device Log even if we can't talk to the firmware. */ pf_dparams = t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG)); if (pf_dparams) { unsigned int nentries, nentries128; dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams); dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4; nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams); nentries = (nentries128 + 1) * 128; dparams->size = nentries * sizeof(struct fw_devlog_e); return 0; } /* * For any failing returns ... */ memset(dparams, 0, sizeof *dparams); /* * If we can't talk to the firmware, there's really nothing we can do * at this point. */ if (!fw_attach) return -ENXIO; /* Otherwise, ask the firmware for it's Device Log Parameters. */ memset(&devlog_cmd, 0, sizeof devlog_cmd); devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ); devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), &devlog_cmd); if (ret) return ret; devlog_meminfo = be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog); dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo); dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4; dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog); return 0; } /** * t4_init_sge_params - initialize adap->params.sge * @adapter: the adapter * * Initialize various fields of the adapter's SGE Parameters structure. */ int t4_init_sge_params(struct adapter *adapter) { u32 r; struct sge_params *sp = &adapter->params.sge; unsigned i, tscale = 1; r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD); sp->counter_val[0] = G_THRESHOLD_0(r); sp->counter_val[1] = G_THRESHOLD_1(r); sp->counter_val[2] = G_THRESHOLD_2(r); sp->counter_val[3] = G_THRESHOLD_3(r); if (chip_id(adapter) >= CHELSIO_T6) { r = t4_read_reg(adapter, A_SGE_ITP_CONTROL); tscale = G_TSCALE(r); if (tscale == 0) tscale = 1; else tscale += 2; } r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1); sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale; sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale; r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3); sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale; sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale; r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5); sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale; sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale; r = t4_read_reg(adapter, A_SGE_CONM_CTRL); sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; if (is_t4(adapter)) sp->fl_starve_threshold2 = sp->fl_starve_threshold; else if (is_t5(adapter)) sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; else sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1; /* egress queues: log2 of # of doorbells per BAR2 page */ r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); r >>= S_QUEUESPERPAGEPF0 + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0; /* ingress queues: log2 of # of doorbells per BAR2 page */ r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); r >>= S_QUEUESPERPAGEPF0 + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0; r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); r >>= S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf; sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10; r = t4_read_reg(adapter, A_SGE_CONTROL); sp->sge_control = r; sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64; sp->fl_pktshift = G_PKTSHIFT(r); if (chip_id(adapter) <= CHELSIO_T5) { sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + X_INGPADBOUNDARY_SHIFT); } else { sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + X_T6_INGPADBOUNDARY_SHIFT); } if (is_t4(adapter)) sp->pack_boundary = sp->pad_boundary; else { r = t4_read_reg(adapter, A_SGE_CONTROL2); if (G_INGPACKBOUNDARY(r) == 0) sp->pack_boundary = 16; else sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5); } for (i = 0; i < SGE_FLBUF_SIZES; i++) sp->sge_fl_buffer_size[i] = t4_read_reg(adapter, A_SGE_FL_BUFFER_SIZE0 + (4 * i)); return 0; } +/* Convert the LE's hardware hash mask to a shorter filter mask. */ +static inline uint16_t +hashmask_to_filtermask(uint64_t hashmask, uint16_t filter_mode) +{ + static const uint8_t width[] = {1, 3, 17, 17, 8, 8, 16, 9, 3, 1}; + int i; + uint16_t filter_mask; + uint64_t mask; /* field mask */ + + filter_mask = 0; + for (i = S_FCOE; i <= S_FRAGMENTATION; i++) { + if ((filter_mode & (1 << i)) == 0) + continue; + mask = (1 << width[i]) - 1; + if ((hashmask & mask) == mask) + filter_mask |= 1 << i; + hashmask >>= width[i]; + } + + return (filter_mask); +} + /* * Read and cache the adapter's compressed filter mode and ingress config. */ -static void read_filter_mode_and_ingress_config(struct adapter *adap, - bool sleep_ok) +static void +read_filter_mode_and_ingress_config(struct adapter *adap) { - uint32_t v; + int rc; + uint32_t v, param[2], val[2]; struct tp_params *tpp = &adap->params.tp; + uint64_t hash_mask; + + param[0] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) | + V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK); + param[1] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) | + V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE); + rc = -t4_query_params(adap, adap->mbox, adap->pf, 0, 2, param, val); + if (rc == 0) { + tpp->filter_mode = G_FW_PARAMS_PARAM_FILTER_MODE(val[0]); + tpp->filter_mask = G_FW_PARAMS_PARAM_FILTER_MASK(val[0]); + tpp->vnic_mode = val[1]; + } else { + /* + * Old firmware. Read filter mode/mask and ingress config + * straight from the hardware. + */ + t4_tp_pio_read(adap, &v, 1, A_TP_VLAN_PRI_MAP, true); + tpp->filter_mode = v & 0xffff; + + hash_mask = 0; + if (chip_id(adap) > CHELSIO_T4) { + v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3)); + hash_mask = v; + v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4)); + hash_mask |= (u64)v << 32; + } + tpp->filter_mask = hashmask_to_filtermask(hash_mask, + tpp->filter_mode); - t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP, - sleep_ok); - t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG, - sleep_ok); + t4_tp_pio_read(adap, &v, 1, A_TP_INGRESS_CONFIG, true); + if (v & F_VNIC) + tpp->vnic_mode = FW_VNIC_MODE_PF_VF; + else + tpp->vnic_mode = FW_VNIC_MODE_OUTER_VLAN; + } /* * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field * shift positions of several elements of the Compressed Filter Tuple * for this adapter which we need frequently ... */ tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE); tpp->port_shift = t4_filter_field_shift(adap, F_PORT); tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN); tpp->tos_shift = t4_filter_field_shift(adap, F_TOS); tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE); tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH); tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE); tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION); - - if (chip_id(adap) > CHELSIO_T4) { - v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3)); - adap->params.tp.hash_filter_mask = v; - v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4)); - adap->params.tp.hash_filter_mask |= (u64)v << 32; - } } /** * t4_init_tp_params - initialize adap->params.tp * @adap: the adapter * * Initialize various fields of the adapter's TP Parameters structure. */ int t4_init_tp_params(struct adapter *adap) { int chan; u32 tx_len, rx_len, r, v; struct tp_params *tpp = &adap->params.tp; v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); tpp->tre = G_TIMERRESOLUTION(v); tpp->dack_re = G_DELAYEDACKRESOLUTION(v); /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ for (chan = 0; chan < MAX_NCHAN; chan++) tpp->tx_modq[chan] = chan; - read_filter_mode_and_ingress_config(adap, true); + read_filter_mode_and_ingress_config(adap); if (chip_id(adap) > CHELSIO_T5) { v = t4_read_reg(adap, A_TP_OUT_CONFIG); tpp->rx_pkt_encap = v & F_CRXPKTENC; } else tpp->rx_pkt_encap = false; rx_len = t4_read_reg(adap, A_TP_PMM_RX_PAGE_SIZE); tx_len = t4_read_reg(adap, A_TP_PMM_TX_PAGE_SIZE); r = t4_read_reg(adap, A_TP_PARA_REG2); rx_len = min(rx_len, G_MAXRXDATA(r)); tx_len = min(tx_len, G_MAXRXDATA(r)); r = t4_read_reg(adap, A_TP_PARA_REG7); v = min(G_PMMAXXFERLEN0(r), G_PMMAXXFERLEN1(r)); rx_len = min(rx_len, v); tx_len = min(tx_len, v); tpp->max_tx_pdu = tx_len; tpp->max_rx_pdu = rx_len; return 0; } /** * t4_filter_field_shift - calculate filter field shift * @adap: the adapter * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) * * Return the shift position of a filter field within the Compressed * Filter Tuple. The filter field is specified via its selection bit * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. */ int t4_filter_field_shift(const struct adapter *adap, int filter_sel) { - unsigned int filter_mode = adap->params.tp.vlan_pri_map; + const unsigned int filter_mode = adap->params.tp.filter_mode; unsigned int sel; int field_shift; if ((filter_mode & filter_sel) == 0) return -1; for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { switch (filter_mode & sel) { case F_FCOE: field_shift += W_FT_FCOE; break; case F_PORT: field_shift += W_FT_PORT; break; case F_VNIC_ID: field_shift += W_FT_VNIC_ID; break; case F_VLAN: field_shift += W_FT_VLAN; break; case F_TOS: field_shift += W_FT_TOS; break; case F_PROTOCOL: field_shift += W_FT_PROTOCOL; break; case F_ETHERTYPE: field_shift += W_FT_ETHERTYPE; break; case F_MACMATCH: field_shift += W_FT_MACMATCH; break; case F_MPSHITTYPE: field_shift += W_FT_MPSHITTYPE; break; case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break; } } return field_shift; } int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id) { u8 addr[6]; int ret, i, j; struct port_info *p = adap2pinfo(adap, port_id); u32 param, val; struct vi_info *vi = &p->vi[0]; for (i = 0, j = -1; i <= p->port_id; i++) { do { j++; } while ((adap->params.portvec & (1 << j)) == 0); } p->tx_chan = j; p->mps_bg_map = t4_get_mps_bg_map(adap, j); p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j); p->rx_c_chan = t4_get_rx_c_chan(adap, j); p->lport = j; if (!(adap->flags & IS_VF) || adap->params.vfres.r_caps & FW_CMD_CAP_PORT) { t4_update_port_info(p); } ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &vi->rss_size, &vi->vfvld, &vi->vin); if (ret < 0) return ret; vi->viid = ret; t4_os_set_hw_addr(p, addr); param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | V_FW_PARAMS_PARAM_YZ(vi->viid); ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val); if (ret) vi->rss_base = 0xffff; else { /* MPASS((val >> 16) == rss_size); */ vi->rss_base = val & 0xffff; } return 0; } /** * t4_read_cimq_cfg - read CIM queue configuration * @adap: the adapter * @base: holds the queue base addresses in bytes * @size: holds the queue sizes in bytes * @thres: holds the queue full thresholds in bytes * * Returns the current configuration of the CIM queues, starting with * the IBQs, then the OBQs. */ void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) { unsigned int i, v; int cim_num_obq = adap->chip_params->cim_num_obq; for (i = 0; i < CIM_NUM_IBQ; i++) { t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | V_QUENUMSELECT(i)); v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); /* value is in 256-byte units */ *base++ = G_CIMQBASE(v) * 256; *size++ = G_CIMQSIZE(v) * 256; *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ } for (i = 0; i < cim_num_obq; i++) { t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | V_QUENUMSELECT(i)); v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); /* value is in 256-byte units */ *base++ = G_CIMQBASE(v) * 256; *size++ = G_CIMQSIZE(v) * 256; } } /** * t4_read_cim_ibq - read the contents of a CIM inbound queue * @adap: the adapter * @qid: the queue index * @data: where to store the queue contents * @n: capacity of @data in 32-bit words * * Reads the contents of the selected CIM queue starting at address 0 up * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on * error and the number of 32-bit words actually read on success. */ int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) { int i, err, attempts; unsigned int addr; const unsigned int nwords = CIM_IBQ_SIZE * 4; if (qid > 5 || (n & 3)) return -EINVAL; addr = qid * nwords; if (n > nwords) n = nwords; /* It might take 3-10ms before the IBQ debug read access is allowed. * Wait for 1 Sec with a delay of 1 usec. */ attempts = 1000000; for (i = 0; i < n; i++, addr++) { t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | F_IBQDBGEN); err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, attempts, 1); if (err) return err; *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); } t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); return i; } /** * t4_read_cim_obq - read the contents of a CIM outbound queue * @adap: the adapter * @qid: the queue index * @data: where to store the queue contents * @n: capacity of @data in 32-bit words * * Reads the contents of the selected CIM queue starting at address 0 up * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on * error and the number of 32-bit words actually read on success. */ int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) { int i, err; unsigned int addr, v, nwords; int cim_num_obq = adap->chip_params->cim_num_obq; if ((qid > (cim_num_obq - 1)) || (n & 3)) return -EINVAL; t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | V_QUENUMSELECT(qid)); v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ nwords = G_CIMQSIZE(v) * 64; /* same */ if (n > nwords) n = nwords; for (i = 0; i < n; i++, addr++) { t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | F_OBQDBGEN); err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 2, 1); if (err) return err; *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); } t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); return i; } enum { CIM_QCTL_BASE = 0, CIM_CTL_BASE = 0x2000, CIM_PBT_ADDR_BASE = 0x2800, CIM_PBT_LRF_BASE = 0x3000, CIM_PBT_DATA_BASE = 0x3800 }; /** * t4_cim_read - read a block from CIM internal address space * @adap: the adapter * @addr: the start address within the CIM address space * @n: number of words to read * @valp: where to store the result * * Reads a block of 4-byte words from the CIM intenal address space. */ int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, unsigned int *valp) { int ret = 0; if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) return -EBUSY; for ( ; !ret && n--; addr += 4) { t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 0, 5, 2); if (!ret) *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); } return ret; } /** * t4_cim_write - write a block into CIM internal address space * @adap: the adapter * @addr: the start address within the CIM address space * @n: number of words to write * @valp: set of values to write * * Writes a block of 4-byte words into the CIM intenal address space. */ int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, const unsigned int *valp) { int ret = 0; if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) return -EBUSY; for ( ; !ret && n--; addr += 4) { t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 0, 5, 2); } return ret; } static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val) { return t4_cim_write(adap, addr, 1, &val); } /** * t4_cim_ctl_read - read a block from CIM control region * @adap: the adapter * @addr: the start address within the CIM control region * @n: number of words to read * @valp: where to store the result * * Reads a block of 4-byte words from the CIM control region. */ int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, unsigned int *valp) { return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp); } /** * t4_cim_read_la - read CIM LA capture buffer * @adap: the adapter * @la_buf: where to store the LA data * @wrptr: the HW write pointer within the capture buffer * * Reads the contents of the CIM LA buffer with the most recent entry at * the end of the returned data and with the entry at @wrptr first. * We try to leave the LA in the running state we find it in. */ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) { int i, ret; unsigned int cfg, val, idx; ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); if (ret) return ret; if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); if (ret) return ret; } ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); if (ret) goto restart; idx = G_UPDBGLAWRPTR(val); if (wrptr) *wrptr = idx; for (i = 0; i < adap->params.cim_la_size; i++) { ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); if (ret) break; ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); if (ret) break; if (val & F_UPDBGLARDEN) { ret = -ETIMEDOUT; break; } ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); if (ret) break; /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to * identify the 32-bit portion of the full 312-bit data */ if (is_t6(adap) && (idx & 0xf) >= 9) idx = (idx & 0xff0) + 0x10; else idx++; /* address can't exceed 0xfff */ idx &= M_UPDBGLARDPTR; } restart: if (cfg & F_UPDBGLAEN) { int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, cfg & ~F_UPDBGLARDEN); if (!ret) ret = r; } return ret; } /** * t4_tp_read_la - read TP LA capture buffer * @adap: the adapter * @la_buf: where to store the LA data * @wrptr: the HW write pointer within the capture buffer * * Reads the contents of the TP LA buffer with the most recent entry at * the end of the returned data and with the entry at @wrptr first. * We leave the LA in the running state we find it in. */ void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) { bool last_incomplete; unsigned int i, cfg, val, idx; cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; if (cfg & F_DBGLAENABLE) /* freeze LA */ t4_write_reg(adap, A_TP_DBG_LA_CONFIG, adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); idx = G_DBGLAWPTR(val); last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; if (last_incomplete) idx = (idx + 1) & M_DBGLARPTR; if (wrptr) *wrptr = idx; val &= 0xffff; val &= ~V_DBGLARPTR(M_DBGLARPTR); val |= adap->params.tp.la_mask; for (i = 0; i < TPLA_SIZE; i++) { t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); idx = (idx + 1) & M_DBGLARPTR; } /* Wipe out last entry if it isn't valid */ if (last_incomplete) la_buf[TPLA_SIZE - 1] = ~0ULL; if (cfg & F_DBGLAENABLE) /* restore running state */ t4_write_reg(adap, A_TP_DBG_LA_CONFIG, cfg | adap->params.tp.la_mask); } /* * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in * seconds). If we find one of the SGE Ingress DMA State Machines in the same * state for more than the Warning Threshold then we'll issue a warning about * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel * appears to be hung every Warning Repeat second till the situation clears. * If the situation clears, we'll note that as well. */ #define SGE_IDMA_WARN_THRESH 1 #define SGE_IDMA_WARN_REPEAT 300 /** * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor * @adapter: the adapter * @idma: the adapter IDMA Monitor state * * Initialize the state of an SGE Ingress DMA Monitor. */ void t4_idma_monitor_init(struct adapter *adapter, struct sge_idma_monitor_state *idma) { /* Initialize the state variables for detecting an SGE Ingress DMA * hang. The SGE has internal counters which count up on each clock * tick whenever the SGE finds its Ingress DMA State Engines in the * same state they were on the previous clock tick. The clock used is * the Core Clock so we have a limit on the maximum "time" they can * record; typically a very small number of seconds. For instance, * with a 600MHz Core Clock, we can only count up to a bit more than * 7s. So we'll synthesize a larger counter in order to not run the * risk of having the "timers" overflow and give us the flexibility to * maintain a Hung SGE State Machine of our own which operates across * a longer time frame. */ idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */ idma->idma_stalled[0] = idma->idma_stalled[1] = 0; } /** * t4_idma_monitor - monitor SGE Ingress DMA state * @adapter: the adapter * @idma: the adapter IDMA Monitor state * @hz: number of ticks/second * @ticks: number of ticks since the last IDMA Monitor call */ void t4_idma_monitor(struct adapter *adapter, struct sge_idma_monitor_state *idma, int hz, int ticks) { int i, idma_same_state_cnt[2]; /* Read the SGE Debug Ingress DMA Same State Count registers. These * are counters inside the SGE which count up on each clock when the * SGE finds its Ingress DMA State Engines in the same states they * were in the previous clock. The counters will peg out at * 0xffffffff without wrapping around so once they pass the 1s * threshold they'll stay above that till the IDMA state changes. */ t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13); idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH); idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); for (i = 0; i < 2; i++) { u32 debug0, debug11; /* If the Ingress DMA Same State Counter ("timer") is less * than 1s, then we can reset our synthesized Stall Timer and * continue. If we have previously emitted warnings about a * potential stalled Ingress Queue, issue a note indicating * that the Ingress Queue has resumed forward progress. */ if (idma_same_state_cnt[i] < idma->idma_1s_thresh) { if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz) CH_WARN(adapter, "SGE idma%d, queue %u, " "resumed after %d seconds\n", i, idma->idma_qid[i], idma->idma_stalled[i]/hz); idma->idma_stalled[i] = 0; continue; } /* Synthesize an SGE Ingress DMA Same State Timer in the Hz * domain. The first time we get here it'll be because we * passed the 1s Threshold; each additional time it'll be * because the RX Timer Callback is being fired on its regular * schedule. * * If the stall is below our Potential Hung Ingress Queue * Warning Threshold, continue. */ if (idma->idma_stalled[i] == 0) { idma->idma_stalled[i] = hz; idma->idma_warn[i] = 0; } else { idma->idma_stalled[i] += ticks; idma->idma_warn[i] -= ticks; } if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz) continue; /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds. */ if (idma->idma_warn[i] > 0) continue; idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz; /* Read and save the SGE IDMA State and Queue ID information. * We do this every time in case it changes across time ... * can't be too careful ... */ t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0); debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11); debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in " " state %u for %d seconds (debug0=%#x, debug11=%#x)\n", i, idma->idma_qid[i], idma->idma_state[i], idma->idma_stalled[i]/hz, debug0, debug11); t4_sge_decode_idma_state(adapter, idma->idma_state[i]); } } /** * t4_set_vf_mac - Set MAC address for the specified VF * @adapter: The adapter * @pf: the PF used to instantiate the VFs * @vf: one of the VFs instantiated by the specified PF * @naddr: the number of MAC addresses * @addr: the MAC address(es) to be set to the specified VF */ int t4_set_vf_mac(struct adapter *adapter, unsigned int pf, unsigned int vf, unsigned int naddr, u8 *addr) { struct fw_acl_mac_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_ACL_MAC_CMD_PFN(pf) | V_FW_ACL_MAC_CMD_VFN(vf)); /* Note: Do not enable the ACL */ cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); cmd.nmac = naddr; switch (pf) { case 3: memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3)); break; case 2: memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2)); break; case 1: memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1)); break; case 0: memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0)); break; } return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd); } /** * t4_read_pace_tbl - read the pace table * @adap: the adapter * @pace_vals: holds the returned values * * Returns the values of TP's pace table in microseconds. */ void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) { unsigned int i, v; for (i = 0; i < NTX_SCHED; i++) { t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); v = t4_read_reg(adap, A_TP_PACE_TABLE); pace_vals[i] = dack_ticks_to_usec(adap, v); } } /** * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler * @adap: the adapter * @sched: the scheduler index * @kbps: the byte rate in Kbps * @ipg: the interpacket delay in tenths of nanoseconds * * Return the current configuration of a HW Tx scheduler. */ void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, unsigned int *ipg, bool sleep_ok) { unsigned int v, addr, bpt, cpt; if (kbps) { addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); if (sched & 1) v >>= 16; bpt = (v >> 8) & 0xff; cpt = v & 0xff; if (!cpt) *kbps = 0; /* scheduler disabled */ else { v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ *kbps = (v * bpt) / 125; } } if (ipg) { addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); if (sched & 1) v >>= 16; v &= 0xffff; *ipg = (10000 * v) / core_ticks_per_usec(adap); } } /** * t4_load_cfg - download config file * @adap: the adapter * @cfg_data: the cfg text file to write * @size: text file size * * Write the supplied config text file to the card's serial flash. */ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) { int ret, i, n, cfg_addr; unsigned int addr; unsigned int flash_cfg_start_sec; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; cfg_addr = t4_flash_cfg_addr(adap); if (cfg_addr < 0) return cfg_addr; addr = cfg_addr; flash_cfg_start_sec = addr / SF_SEC_SIZE; if (size > FLASH_CFG_MAX_SIZE) { CH_ERR(adap, "cfg file too large, max is %u bytes\n", FLASH_CFG_MAX_SIZE); return -EFBIG; } i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ sf_sec_size); ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, flash_cfg_start_sec + i - 1); /* * If size == 0 then we're simply erasing the FLASH sectors associated * with the on-adapter Firmware Configuration File. */ if (ret || size == 0) goto out; /* this will write to the flash up to SF_PAGE_SIZE at a time */ for (i = 0; i< size; i+= SF_PAGE_SIZE) { if ( (size - i) < SF_PAGE_SIZE) n = size - i; else n = SF_PAGE_SIZE; ret = t4_write_flash(adap, addr, n, cfg_data, 1); if (ret) goto out; addr += SF_PAGE_SIZE; cfg_data += SF_PAGE_SIZE; } out: if (ret) CH_ERR(adap, "config file %s failed %d\n", (size == 0 ? "clear" : "download"), ret); return ret; } /** * t5_fw_init_extern_mem - initialize the external memory * @adap: the adapter * * Initializes the external memory on T5. */ int t5_fw_init_extern_mem(struct adapter *adap) { u32 params[1], val[1]; int ret; if (!is_t5(adap)) return 0; val[0] = 0xff; /* Initialize all MCs */ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT)); ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val, FW_CMD_MAX_TIMEOUT); return ret; } /* BIOS boot headers */ typedef struct pci_expansion_rom_header { u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ u8 reserved[22]; /* Reserved per processor Architecture data */ u8 pcir_offset[2]; /* Offset to PCI Data Structure */ } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ /* Legacy PCI Expansion ROM Header */ typedef struct legacy_pci_expansion_rom_header { u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ u8 size512; /* Current Image Size in units of 512 bytes */ u8 initentry_point[4]; u8 cksum; /* Checksum computed on the entire Image */ u8 reserved[16]; /* Reserved */ u8 pcir_offset[2]; /* Offset to PCI Data Struture */ } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ /* EFI PCI Expansion ROM Header */ typedef struct efi_pci_expansion_rom_header { u8 signature[2]; // ROM signature. The value 0xaa55 u8 initialization_size[2]; /* Units 512. Includes this header */ u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ u8 efi_machine_type[2]; /* Machine type from EFI image header */ u8 compression_type[2]; /* Compression type. */ /* * Compression type definition * 0x0: uncompressed * 0x1: Compressed * 0x2-0xFFFF: Reserved */ u8 reserved[8]; /* Reserved */ u8 efi_image_header_offset[2]; /* Offset to EFI Image */ u8 pcir_offset[2]; /* Offset to PCI Data Structure */ } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ /* PCI Data Structure Format */ typedef struct pcir_data_structure { /* PCI Data Structure */ u8 signature[4]; /* Signature. The string "PCIR" */ u8 vendor_id[2]; /* Vendor Identification */ u8 device_id[2]; /* Device Identification */ u8 vital_product[2]; /* Pointer to Vital Product Data */ u8 length[2]; /* PCIR Data Structure Length */ u8 revision; /* PCIR Data Structure Revision */ u8 class_code[3]; /* Class Code */ u8 image_length[2]; /* Image Length. Multiple of 512B */ u8 code_revision[2]; /* Revision Level of Code/Data */ u8 code_type; /* Code Type. */ /* * PCI Expansion ROM Code Types * 0x00: Intel IA-32, PC-AT compatible. Legacy * 0x01: Open Firmware standard for PCI. FCODE * 0x02: Hewlett-Packard PA RISC. HP reserved * 0x03: EFI Image. EFI * 0x04-0xFF: Reserved. */ u8 indicator; /* Indicator. Identifies the last image in the ROM */ u8 reserved[2]; /* Reserved */ } pcir_data_t; /* PCI__DATA_STRUCTURE */ /* BOOT constants */ enum { BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ VENDOR_ID = 0x1425, /* Vendor ID */ PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ }; /* * modify_device_id - Modifies the device ID of the Boot BIOS image * @adatper: the device ID to write. * @boot_data: the boot image to modify. * * Write the supplied device ID to the boot BIOS image. */ static void modify_device_id(int device_id, u8 *boot_data) { legacy_pci_exp_rom_header_t *header; pcir_data_t *pcir_header; u32 cur_header = 0; /* * Loop through all chained images and change the device ID's */ while (1) { header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; pcir_header = (pcir_data_t *) &boot_data[cur_header + le16_to_cpu(*(u16*)header->pcir_offset)]; /* * Only modify the Device ID if code type is Legacy or HP. * 0x00: Okay to modify * 0x01: FCODE. Do not be modify * 0x03: Okay to modify * 0x04-0xFF: Do not modify */ if (pcir_header->code_type == 0x00) { u8 csum = 0; int i; /* * Modify Device ID to match current adatper */ *(u16*) pcir_header->device_id = device_id; /* * Set checksum temporarily to 0. * We will recalculate it later. */ header->cksum = 0x0; /* * Calculate and update checksum */ for (i = 0; i < (header->size512 * 512); i++) csum += (u8)boot_data[cur_header + i]; /* * Invert summed value to create the checksum * Writing new checksum value directly to the boot data */ boot_data[cur_header + 7] = -csum; } else if (pcir_header->code_type == 0x03) { /* * Modify Device ID to match current adatper */ *(u16*) pcir_header->device_id = device_id; } /* * Check indicator element to identify if this is the last * image in the ROM. */ if (pcir_header->indicator & 0x80) break; /* * Move header pointer up to the next image in the ROM. */ cur_header += header->size512 * 512; } } /* * t4_load_boot - download boot flash * @adapter: the adapter * @boot_data: the boot image to write * @boot_addr: offset in flash to write boot_data * @size: image size * * Write the supplied boot image to the card's serial flash. * The boot image has the following sections: a 28-byte header and the * boot image. */ int t4_load_boot(struct adapter *adap, u8 *boot_data, unsigned int boot_addr, unsigned int size) { pci_exp_rom_header_t *header; int pcir_offset ; pcir_data_t *pcir_header; int ret, addr; uint16_t device_id; unsigned int i; unsigned int boot_sector = (boot_addr * 1024 ); unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; /* * Make sure the boot image does not encroach on the firmware region */ if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { CH_ERR(adap, "boot image encroaching on firmware region\n"); return -EFBIG; } /* * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot, * and Boot configuration data sections. These 3 boot sections span * sectors 0 to 7 in flash and live right before the FW image location. */ i = DIV_ROUND_UP(size ? size : FLASH_FW_START, sf_sec_size); ret = t4_flash_erase_sectors(adap, boot_sector >> 16, (boot_sector >> 16) + i - 1); /* * If size == 0 then we're simply erasing the FLASH sectors associated * with the on-adapter option ROM file */ if (ret || (size == 0)) goto out; /* Get boot header */ header = (pci_exp_rom_header_t *)boot_data; pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); /* PCIR Data Structure */ pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; /* * Perform some primitive sanity testing to avoid accidentally * writing garbage over the boot sectors. We ought to check for * more but it's not worth it for now ... */ if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { CH_ERR(adap, "boot image too small/large\n"); return -EFBIG; } #ifndef CHELSIO_T4_DIAGS /* * Check BOOT ROM header signature */ if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { CH_ERR(adap, "Boot image missing signature\n"); return -EINVAL; } /* * Check PCI header signature */ if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { CH_ERR(adap, "PCI header missing signature\n"); return -EINVAL; } /* * Check Vendor ID matches Chelsio ID */ if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { CH_ERR(adap, "Vendor ID missing signature\n"); return -EINVAL; } #endif /* * Retrieve adapter's device ID */ t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); /* Want to deal with PF 0 so I strip off PF 4 indicator */ device_id = device_id & 0xf0ff; /* * Check PCIE Device ID */ if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { /* * Change the device ID in the Boot BIOS image to match * the Device ID of the current adapter. */ modify_device_id(device_id, boot_data); } /* * Skip over the first SF_PAGE_SIZE worth of data and write it after * we finish copying the rest of the boot image. This will ensure * that the BIOS boot header will only be written if the boot image * was written in full. */ addr = boot_sector; for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { addr += SF_PAGE_SIZE; boot_data += SF_PAGE_SIZE; ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); if (ret) goto out; } ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, (const u8 *)header, 0); out: if (ret) CH_ERR(adap, "boot image download failed, error %d\n", ret); return ret; } /* * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration * @adapter: the adapter * * Return the address within the flash where the OptionROM Configuration * is stored, or an error if the device FLASH is too small to contain * a OptionROM Configuration. */ static int t4_flash_bootcfg_addr(struct adapter *adapter) { /* * If the device FLASH isn't large enough to hold a Firmware * Configuration File, return an error. */ if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE) return -ENOSPC; return FLASH_BOOTCFG_START; } int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size) { int ret, i, n, cfg_addr; unsigned int addr; unsigned int flash_cfg_start_sec; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; cfg_addr = t4_flash_bootcfg_addr(adap); if (cfg_addr < 0) return cfg_addr; addr = cfg_addr; flash_cfg_start_sec = addr / SF_SEC_SIZE; if (size > FLASH_BOOTCFG_MAX_SIZE) { CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", FLASH_BOOTCFG_MAX_SIZE); return -EFBIG; } i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */ sf_sec_size); ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, flash_cfg_start_sec + i - 1); /* * If size == 0 then we're simply erasing the FLASH sectors associated * with the on-adapter OptionROM Configuration File. */ if (ret || size == 0) goto out; /* this will write to the flash up to SF_PAGE_SIZE at a time */ for (i = 0; i< size; i+= SF_PAGE_SIZE) { if ( (size - i) < SF_PAGE_SIZE) n = size - i; else n = SF_PAGE_SIZE; ret = t4_write_flash(adap, addr, n, cfg_data, 0); if (ret) goto out; addr += SF_PAGE_SIZE; cfg_data += SF_PAGE_SIZE; } out: if (ret) CH_ERR(adap, "boot config data %s failed %d\n", (size == 0 ? "clear" : "download"), ret); return ret; } /** - * t4_set_filter_mode - configure the optional components of filter tuples + * t4_set_filter_cfg - set up filter mode/mask and ingress config. * @adap: the adapter - * @mode_map: a bitmap selcting which optional filter components to enable - * @sleep_ok: if true we may sleep while awaiting command completion - * - * Sets the filter mode by selecting the optional components to enable - * in filter tuples. Returns 0 on success and a negative error if the - * requested mode needs more bits than are available for optional - * components. - */ -int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map, - bool sleep_ok) -{ - static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; + * @mode: a bitmap selecting which optional filter components to enable + * @mask: a bitmap selecting which components to enable in filter mask + * @vnic_mode: the ingress config/vnic mode setting + * + * Sets the filter mode and mask by selecting the optional components to + * enable in filter tuples. Returns 0 on success and a negative error if + * the requested mode needs more bits than are available for optional + * components. The filter mask must be a subset of the filter mode. + */ +int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode) +{ + static const uint8_t width[] = {1, 3, 17, 17, 8, 8, 16, 9, 3, 1}; + int i, nbits, rc; + uint32_t param, val; + uint16_t fmode, fmask; + const int maxbits = FILTER_OPT_LEN; + + if (mode != -1 || mask != -1) { + if (mode != -1) { + fmode = mode; + nbits = 0; + for (i = S_FCOE; i <= S_FRAGMENTATION; i++) { + if (fmode & (1 << i)) + nbits += width[i]; + } + if (nbits > maxbits) { + CH_ERR(adap, "optional fields in the filter " + "mode (0x%x) add up to %d bits " + "(must be <= %db). Remove some fields and " + "try again.\n", fmode, nbits, maxbits); + return -E2BIG; + } - int i, nbits = 0; + /* + * Hardware wants the bits to be maxed out. Keep + * setting them until there's no room for more. + */ + for (i = S_FCOE; i <= S_FRAGMENTATION; i++) { + if (fmode & (1 << i)) + continue; + if (nbits + width[i] <= maxbits) { + fmode |= 1 << i; + nbits += width[i]; + if (nbits == maxbits) + break; + } + } - for (i = S_FCOE; i <= S_FRAGMENTATION; i++) - if (mode_map & (1 << i)) - nbits += width[i]; - if (nbits > FILTER_OPT_LEN) - return -EINVAL; - t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok); - read_filter_mode_and_ingress_config(adap, sleep_ok); + fmask = fmode & adap->params.tp.filter_mask; + if (fmask != adap->params.tp.filter_mask) { + CH_WARN(adap, + "filter mask will be changed from 0x%x to " + "0x%x to comply with the filter mode (0x%x).\n", + adap->params.tp.filter_mask, fmask, fmode); + } + } else { + fmode = adap->params.tp.filter_mode; + fmask = mask; + if ((fmode | fmask) != fmode) { + CH_ERR(adap, + "filter mask (0x%x) must be a subset of " + "the filter mode (0x%x).\n", fmask, fmode); + return -EINVAL; + } + } + + param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) | + V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK); + val = V_FW_PARAMS_PARAM_FILTER_MODE(fmode) | + V_FW_PARAMS_PARAM_FILTER_MASK(fmask); + rc = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, + &val); + if (rc < 0) + return rc; + } + + if (vnic_mode != -1) { + param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) | + V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE); + val = vnic_mode; + rc = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, + &val); + if (rc < 0) + return rc; + } + + /* Refresh. */ + read_filter_mode_and_ingress_config(adap); return 0; } /** * t4_clr_port_stats - clear port statistics * @adap: the adapter * @idx: the port index * * Clear HW statistics for the given port. */ void t4_clr_port_stats(struct adapter *adap, int idx) { unsigned int i; u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; u32 port_base_addr; if (is_t4(adap)) port_base_addr = PORT_BASE(idx); else port_base_addr = T5_PORT_BASE(idx); for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) t4_write_reg(adap, port_base_addr + i, 0); for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) t4_write_reg(adap, port_base_addr + i, 0); for (i = 0; i < 4; i++) if (bgmap & (1 << i)) { t4_write_reg(adap, A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); t4_write_reg(adap, A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); } } /** * t4_i2c_io - read/write I2C data from adapter * @adap: the adapter * @port: Port number if per-port device; <0 if not * @devid: per-port device ID or absolute device ID * @offset: byte offset into device I2C space * @len: byte length of I2C space data * @buf: buffer in which to return I2C data for read * buffer which holds the I2C data for write * @write: if true, do a write; else do a read * Reads/Writes the I2C data from/to the indicated device and location. */ int t4_i2c_io(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf, bool write) { struct fw_ldst_cmd ldst_cmd, ldst_rpl; unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data); int ret = 0; if (len > I2C_PAGE_SIZE) return -EINVAL; /* Dont allow reads that spans multiple pages */ if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE) return -EINVAL; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | (write ? F_FW_CMD_WRITE : F_FW_CMD_READ) | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C)); ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port); ldst_cmd.u.i2c.did = devid; while (len > 0) { unsigned int i2c_len = (len < i2c_max) ? len : i2c_max; ldst_cmd.u.i2c.boffset = offset; ldst_cmd.u.i2c.blen = i2c_len; if (write) memcpy(ldst_cmd.u.i2c.data, buf, i2c_len); ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd), write ? NULL : &ldst_rpl); if (ret) break; if (!write) memcpy(buf, ldst_rpl.u.i2c.data, i2c_len); offset += i2c_len; buf += i2c_len; len -= i2c_len; } return ret; } int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf) { return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, false); } int t4_i2c_wr(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf) { return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, true); } /** * t4_sge_ctxt_rd - read an SGE context through FW * @adap: the adapter * @mbox: mailbox to use for the FW command * @cid: the context id * @ctype: the context type * @data: where to store the context data * * Issues a FW command through the given mailbox to read an SGE context. */ int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, enum ctxt_type ctype, u32 *data) { int ret; struct fw_ldst_cmd c; if (ctype == CTXT_EGRESS) ret = FW_LDST_ADDRSPC_SGE_EGRC; else if (ctype == CTXT_INGRESS) ret = FW_LDST_ADDRSPC_SGE_INGC; else if (ctype == CTXT_FLM) ret = FW_LDST_ADDRSPC_SGE_FLMC; else ret = FW_LDST_ADDRSPC_SGE_CONMC; memset(&c, 0, sizeof(c)); c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret)); c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c.u.idctxt.physid = cpu_to_be32(cid); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); } return ret; } /** * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW * @adap: the adapter * @cid: the context id * @ctype: the context type * @data: where to store the context data * * Reads an SGE context directly, bypassing FW. This is only for * debugging when FW is unavailable. */ int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, u32 *data) { int i, ret; t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); if (!ret) for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) *data++ = t4_read_reg(adap, i); return ret; } int t4_sched_config(struct adapter *adapter, int type, int minmaxen, int sleep_ok) { struct fw_sched_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.u.config.sc = FW_SCHED_SC_CONFIG; cmd.u.config.type = type; cmd.u.config.minmaxen = minmaxen; return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), NULL, sleep_ok); } int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int rateunit, int ratemode, int channel, int cl, int minrate, int maxrate, int weight, int pktsize, int burstsize, int sleep_ok) { struct fw_sched_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.u.params.sc = FW_SCHED_SC_PARAMS; cmd.u.params.type = type; cmd.u.params.level = level; cmd.u.params.mode = mode; cmd.u.params.ch = channel; cmd.u.params.cl = cl; cmd.u.params.unit = rateunit; cmd.u.params.rate = ratemode; cmd.u.params.min = cpu_to_be32(minrate); cmd.u.params.max = cpu_to_be32(maxrate); cmd.u.params.weight = cpu_to_be16(weight); cmd.u.params.pktsize = cpu_to_be16(pktsize); cmd.u.params.burstsize = cpu_to_be16(burstsize); return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), NULL, sleep_ok); } int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode, unsigned int maxrate, int sleep_ok) { struct fw_sched_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.u.params.sc = FW_SCHED_SC_PARAMS; cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL; cmd.u.params.ch = channel; cmd.u.params.rate = ratemode; /* REL or ABS */ cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */ return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), NULL, sleep_ok); } int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl, int weight, int sleep_ok) { struct fw_sched_cmd cmd; if (weight < 0 || weight > 100) return -EINVAL; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.u.params.sc = FW_SCHED_SC_PARAMS; cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR; cmd.u.params.ch = channel; cmd.u.params.cl = cl; cmd.u.params.weight = cpu_to_be16(weight); return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), NULL, sleep_ok); } int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl, int mode, unsigned int maxrate, int pktsize, int sleep_ok) { struct fw_sched_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.u.params.sc = FW_SCHED_SC_PARAMS; cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL; cmd.u.params.mode = mode; cmd.u.params.ch = channel; cmd.u.params.cl = cl; cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE; cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS; cmd.u.params.max = cpu_to_be32(maxrate); cmd.u.params.pktsize = cpu_to_be16(pktsize); return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), NULL, sleep_ok); } /* * t4_config_watchdog - configure (enable/disable) a watchdog timer * @adapter: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @timeout: watchdog timeout in ms * @action: watchdog timer / action * * There are separate watchdog timers for each possible watchdog * action. Configure one of the watchdog timers by setting a non-zero * timeout. Disable a watchdog timer by using a timeout of zero. */ int t4_config_watchdog(struct adapter *adapter, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int timeout, unsigned int action) { struct fw_watchdog_cmd wdog; unsigned int ticks; /* * The watchdog command expects a timeout in units of 10ms so we need * to convert it here (via rounding) and force a minimum of one 10ms * "tick" if the timeout is non-zero but the conversion results in 0 * ticks. */ ticks = (timeout + 5)/10; if (timeout && !ticks) ticks = 1; memset(&wdog, 0, sizeof wdog); wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) | V_FW_PARAMS_CMD_VFN(vf)); wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog)); wdog.timeout = cpu_to_be32(ticks); wdog.action = cpu_to_be32(action); return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL); } int t4_get_devlog_level(struct adapter *adapter, unsigned int *level) { struct fw_devlog_cmd devlog_cmd; int ret; memset(&devlog_cmd, 0, sizeof(devlog_cmd)); devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ); devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, sizeof(devlog_cmd), &devlog_cmd); if (ret) return ret; *level = devlog_cmd.level; return 0; } int t4_set_devlog_level(struct adapter *adapter, unsigned int level) { struct fw_devlog_cmd devlog_cmd; memset(&devlog_cmd, 0, sizeof(devlog_cmd)); devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); devlog_cmd.level = level; devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, sizeof(devlog_cmd), &devlog_cmd); } int t4_configure_add_smac(struct adapter *adap) { unsigned int param, val; int ret = 0; adap->params.smac_add_support = 0; param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_ADD_SMAC)); /* Query FW to check if FW supports adding source mac address * to TCAM feature or not. * If FW returns 1, driver can use this feature and driver need to send * FW_PARAMS_PARAM_DEV_ADD_SMAC write command with value 1 to * enable adding smac to TCAM. */ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); if (ret) return ret; if (val == 1) { ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); if (!ret) /* Firmware allows adding explicit TCAM entries. * Save this internally. */ adap->params.smac_add_support = 1; } return ret; } int t4_configure_ringbb(struct adapter *adap) { unsigned int param, val; int ret = 0; param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RING_BACKBONE)); /* Query FW to check if FW supports ring switch feature or not. * If FW returns 1, driver can use this feature and driver need to send * FW_PARAMS_PARAM_DEV_RING_BACKBONE write command with value 1 to * enable the ring backbone configuration. */ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); if (ret < 0) { CH_ERR(adap, "Querying FW using Ring backbone params command failed, err=%d\n", ret); goto out; } if (val != 1) { CH_ERR(adap, "FW doesnot support ringbackbone features\n"); goto out; } ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); if (ret < 0) { CH_ERR(adap, "Could not set Ringbackbone, err= %d\n", ret); goto out; } out: return ret; } /* * t4_set_vlan_acl - Set a VLAN id for the specified VF * @adapter: the adapter * @mbox: mailbox to use for the FW command * @vf: one of the VFs instantiated by the specified PF * @vlan: The vlanid to be set * */ int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, u16 vlan) { struct fw_acl_vlan_cmd vlan_cmd; unsigned int enable; enable = (vlan ? F_FW_ACL_VLAN_CMD_EN : 0); memset(&vlan_cmd, 0, sizeof(vlan_cmd)); vlan_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_VLAN_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_ACL_VLAN_CMD_PFN(adap->pf) | V_FW_ACL_VLAN_CMD_VFN(vf)); vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd)); /* Drop all packets that donot match vlan id */ vlan_cmd.dropnovlan_fm = (enable ? (F_FW_ACL_VLAN_CMD_DROPNOVLAN | F_FW_ACL_VLAN_CMD_FM) : 0); if (enable != 0) { vlan_cmd.nvlan = 1; vlan_cmd.vlanid[0] = cpu_to_be16(vlan); } return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL); } /** * t4_del_mac - Removes the exact-match filter for a MAC address * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @addr: the MAC address value * @smac: if true, delete from only the smac region of MPS * * Modifies an exact-match filter and sets it to the new MAC address if * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the * latter case the address is added persistently if @persist is %true. * * Returns a negative error number or the index of the filter with the new * MAC value. Note that this index may differ from @idx. */ int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, const u8 *addr, bool smac) { int ret; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = cpu_to_be32( V_FW_CMD_LEN16(1) | (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); p->valid_to_idx = cpu_to_be16( F_FW_VI_MAC_CMD_VALID | V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); if (ret < max_mac_addr) return -ENOMEM; } return ret; } /** * t4_add_mac - Adds an exact-match filter for a MAC address * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @idx: index of existing filter for old value of MAC address, or -1 * @addr: the new MAC address value * @persist: whether a new MAC allocation should be persistent * @add_smt: if true also add the address to the HW SMT * @smac: if true, update only the smac region of MPS * * Modifies an exact-match filter and sets it to the new MAC address if * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the * latter case the address is added persistently if @persist is %true. * * Returns a negative error number or the index of the filter with the new * MAC value. Note that this index may differ from @idx. */ int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac) { int ret, mode; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; if (idx < 0) /* new allocation */ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = cpu_to_be32( V_FW_CMD_LEN16(1) | (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0)); p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | V_FW_VI_MAC_CMD_IDX(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); if (ret >= max_mac_addr) return -ENOMEM; if (smt_idx) { /* Does fw supports returning smt_idx? */ if (adap->params.viid_smt_extn_support) *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid)); else { /* In T4/T5, SMT contains 256 SMAC entries * organized in 128 rows of 2 entries each. * In T6, SMT contains 256 SMAC entries in * 256 rows. */ if (chip_id(adap) <= CHELSIO_T5) *smt_idx = ((viid & M_FW_VIID_VIN) << 1); else *smt_idx = (viid & M_FW_VIID_VIN); } } } return ret; } diff --git a/sys/dev/cxgbe/firmware/t4fw_interface.h b/sys/dev/cxgbe/firmware/t4fw_interface.h index dcec2cc122f2..30a2e1760052 100644 --- a/sys/dev/cxgbe/firmware/t4fw_interface.h +++ b/sys/dev/cxgbe/firmware/t4fw_interface.h @@ -1,10084 +1,10089 @@ /*- * Copyright (c) 2012-2017 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _T4FW_INTERFACE_H_ #define _T4FW_INTERFACE_H_ /****************************************************************************** * R E T U R N V A L U E S ********************************/ enum fw_retval { FW_SUCCESS = 0, /* completed successfully */ FW_EPERM = 1, /* operation not permitted */ FW_ENOENT = 2, /* no such file or directory */ FW_EIO = 5, /* input/output error; hw bad */ FW_ENOEXEC = 8, /* exec format error; inv microcode */ FW_EAGAIN = 11, /* try again */ FW_ENOMEM = 12, /* out of memory */ FW_EFAULT = 14, /* bad address; fw bad */ FW_EBUSY = 16, /* resource busy */ FW_EEXIST = 17, /* file exists */ FW_ENODEV = 19, /* no such device */ FW_EINVAL = 22, /* invalid argument */ FW_ENOSPC = 28, /* no space left on device */ FW_ENOSYS = 38, /* functionality not implemented */ FW_ENODATA = 61, /* no data available */ FW_EPROTO = 71, /* protocol error */ FW_EADDRINUSE = 98, /* address already in use */ FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */ FW_ENETDOWN = 100, /* network is down */ FW_ENETUNREACH = 101, /* network is unreachable */ FW_ENOBUFS = 105, /* no buffer space available */ FW_ETIMEDOUT = 110, /* timeout */ FW_EINPROGRESS = 115, /* fw internal */ FW_SCSI_ABORT_REQUESTED = 128, /* */ FW_SCSI_ABORT_TIMEDOUT = 129, /* */ FW_SCSI_ABORTED = 130, /* */ FW_SCSI_CLOSE_REQUESTED = 131, /* */ FW_ERR_LINK_DOWN = 132, /* */ FW_RDEV_NOT_READY = 133, /* */ FW_ERR_RDEV_LOST = 134, /* */ FW_ERR_RDEV_LOGO = 135, /* */ FW_FCOE_NO_XCHG = 136, /* */ FW_SCSI_RSP_ERR = 137, /* */ FW_ERR_RDEV_IMPL_LOGO = 138, /* */ FW_SCSI_UNDER_FLOW_ERR = 139, /* */ FW_SCSI_OVER_FLOW_ERR = 140, /* */ FW_SCSI_DDP_ERR = 141, /* DDP error*/ FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */ FW_SCSI_IO_BLOCK = 143, /* IO is going to be blocked due to resource failure */ }; /****************************************************************************** * M E M O R Y T Y P E s ******************************/ enum fw_memtype { FW_MEMTYPE_EDC0 = 0x0, FW_MEMTYPE_EDC1 = 0x1, FW_MEMTYPE_EXTMEM = 0x2, FW_MEMTYPE_FLASH = 0x4, FW_MEMTYPE_INTERNAL = 0x5, FW_MEMTYPE_EXTMEM1 = 0x6, FW_MEMTYPE_HMA = 0x7, }; /****************************************************************************** * W O R K R E Q U E S T s ********************************/ enum fw_wr_opcodes { FW_FRAG_WR = 0x1d, FW_FILTER_WR = 0x02, FW_ULPTX_WR = 0x04, FW_TP_WR = 0x05, FW_ETH_TX_PKT_WR = 0x08, FW_ETH_TX_PKT2_WR = 0x44, FW_ETH_TX_PKTS_WR = 0x09, FW_ETH_TX_PKTS2_WR = 0x78, FW_ETH_TX_EO_WR = 0x1c, FW_EQ_FLUSH_WR = 0x1b, FW_OFLD_CONNECTION_WR = 0x2f, FW_FLOWC_WR = 0x0a, FW_OFLD_TX_DATA_WR = 0x0b, FW_CMD_WR = 0x10, FW_ETH_TX_PKT_VM_WR = 0x11, FW_ETH_TX_PKTS_VM_WR = 0x12, FW_RI_RES_WR = 0x0c, FW_RI_RDMA_WRITE_WR = 0x14, FW_RI_SEND_WR = 0x15, FW_RI_RDMA_READ_WR = 0x16, FW_RI_RECV_WR = 0x17, FW_RI_BIND_MW_WR = 0x18, FW_RI_FR_NSMR_WR = 0x19, FW_RI_FR_NSMR_TPTE_WR = 0x20, FW_RI_RDMA_WRITE_CMPL_WR = 0x21, FW_RI_INV_LSTAG_WR = 0x1a, FW_RI_SEND_IMMEDIATE_WR = 0x15, FW_RI_ATOMIC_WR = 0x16, FW_RI_WR = 0x0d, FW_CHNET_IFCONF_WR = 0x6b, FW_RDEV_WR = 0x38, FW_FOISCSI_NODE_WR = 0x60, FW_FOISCSI_CTRL_WR = 0x6a, FW_FOISCSI_CHAP_WR = 0x6c, FW_FCOE_ELS_CT_WR = 0x30, FW_SCSI_WRITE_WR = 0x31, FW_SCSI_READ_WR = 0x32, FW_SCSI_CMD_WR = 0x33, FW_SCSI_ABRT_CLS_WR = 0x34, FW_SCSI_TGT_ACC_WR = 0x35, FW_SCSI_TGT_XMIT_WR = 0x36, FW_SCSI_TGT_RSP_WR = 0x37, FW_POFCOE_TCB_WR = 0x42, FW_POFCOE_ULPTX_WR = 0x43, FW_ISCSI_TX_DATA_WR = 0x45, FW_PTP_TX_PKT_WR = 0x46, FW_TLSTX_DATA_WR = 0x68, FW_TLS_TUNNEL_OFLD_WR = 0x69, FW_CRYPTO_LOOKASIDE_WR = 0x6d, FW_COISCSI_TGT_WR = 0x70, FW_COISCSI_TGT_CONN_WR = 0x71, FW_COISCSI_TGT_XMIT_WR = 0x72, FW_COISCSI_STATS_WR = 0x73, FW_ISNS_WR = 0x75, FW_ISNS_XMIT_WR = 0x76, FW_FILTER2_WR = 0x77, FW_LASTC2E_WR = 0x80 }; /* * Generic work request header flit0 */ struct fw_wr_hdr { __be32 hi; __be32 lo; }; /* work request opcode (hi) */ #define S_FW_WR_OP 24 #define M_FW_WR_OP 0xff #define V_FW_WR_OP(x) ((x) << S_FW_WR_OP) #define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP) /* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER */ #define S_FW_WR_ATOMIC 23 #define M_FW_WR_ATOMIC 0x1 #define V_FW_WR_ATOMIC(x) ((x) << S_FW_WR_ATOMIC) #define G_FW_WR_ATOMIC(x) \ (((x) >> S_FW_WR_ATOMIC) & M_FW_WR_ATOMIC) #define F_FW_WR_ATOMIC V_FW_WR_ATOMIC(1U) /* flush flag (hi) - firmware flushes flushable work request buffered * in the flow context. */ #define S_FW_WR_FLUSH 22 #define M_FW_WR_FLUSH 0x1 #define V_FW_WR_FLUSH(x) ((x) << S_FW_WR_FLUSH) #define G_FW_WR_FLUSH(x) \ (((x) >> S_FW_WR_FLUSH) & M_FW_WR_FLUSH) #define F_FW_WR_FLUSH V_FW_WR_FLUSH(1U) /* completion flag (hi) - firmware generates a cpl_fw6_ack */ #define S_FW_WR_COMPL 21 #define M_FW_WR_COMPL 0x1 #define V_FW_WR_COMPL(x) ((x) << S_FW_WR_COMPL) #define G_FW_WR_COMPL(x) \ (((x) >> S_FW_WR_COMPL) & M_FW_WR_COMPL) #define F_FW_WR_COMPL V_FW_WR_COMPL(1U) /* work request immediate data lengh (hi) */ #define S_FW_WR_IMMDLEN 0 #define M_FW_WR_IMMDLEN 0xff #define V_FW_WR_IMMDLEN(x) ((x) << S_FW_WR_IMMDLEN) #define G_FW_WR_IMMDLEN(x) \ (((x) >> S_FW_WR_IMMDLEN) & M_FW_WR_IMMDLEN) /* egress queue status update to associated ingress queue entry (lo) */ #define S_FW_WR_EQUIQ 31 #define M_FW_WR_EQUIQ 0x1 #define V_FW_WR_EQUIQ(x) ((x) << S_FW_WR_EQUIQ) #define G_FW_WR_EQUIQ(x) (((x) >> S_FW_WR_EQUIQ) & M_FW_WR_EQUIQ) #define F_FW_WR_EQUIQ V_FW_WR_EQUIQ(1U) /* egress queue status update to egress queue status entry (lo) */ #define S_FW_WR_EQUEQ 30 #define M_FW_WR_EQUEQ 0x1 #define V_FW_WR_EQUEQ(x) ((x) << S_FW_WR_EQUEQ) #define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ) #define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U) /* flow context identifier (lo) */ #define S_FW_WR_FLOWID 8 #define M_FW_WR_FLOWID 0xfffff #define V_FW_WR_FLOWID(x) ((x) << S_FW_WR_FLOWID) #define G_FW_WR_FLOWID(x) (((x) >> S_FW_WR_FLOWID) & M_FW_WR_FLOWID) /* length in units of 16-bytes (lo) */ #define S_FW_WR_LEN16 0 #define M_FW_WR_LEN16 0xff #define V_FW_WR_LEN16(x) ((x) << S_FW_WR_LEN16) #define G_FW_WR_LEN16(x) (((x) >> S_FW_WR_LEN16) & M_FW_WR_LEN16) struct fw_frag_wr { __be32 op_to_fragoff16; __be32 flowid_len16; __be64 r4; }; #define S_FW_FRAG_WR_EOF 15 #define M_FW_FRAG_WR_EOF 0x1 #define V_FW_FRAG_WR_EOF(x) ((x) << S_FW_FRAG_WR_EOF) #define G_FW_FRAG_WR_EOF(x) (((x) >> S_FW_FRAG_WR_EOF) & M_FW_FRAG_WR_EOF) #define F_FW_FRAG_WR_EOF V_FW_FRAG_WR_EOF(1U) #define S_FW_FRAG_WR_FRAGOFF16 8 #define M_FW_FRAG_WR_FRAGOFF16 0x7f #define V_FW_FRAG_WR_FRAGOFF16(x) ((x) << S_FW_FRAG_WR_FRAGOFF16) #define G_FW_FRAG_WR_FRAGOFF16(x) \ (((x) >> S_FW_FRAG_WR_FRAGOFF16) & M_FW_FRAG_WR_FRAGOFF16) /* valid filter configurations for compressed tuple * Encodings: TPL - Compressed TUPLE for filter in addition to 4-tuple * FR - FRAGMENT, FC - FCoE, MT - MPS MATCH TYPE, M - MPS MATCH, * E - Ethertype, P - Port, PR - Protocol, T - TOS, IV - Inner VLAN, * OV - Outer VLAN/VNIC_ID, */ #define HW_TPL_FR_MT_M_E_P_FC 0x3C3 #define HW_TPL_FR_MT_M_PR_T_FC 0x3B3 #define HW_TPL_FR_MT_M_IV_P_FC 0x38B #define HW_TPL_FR_MT_M_OV_P_FC 0x387 #define HW_TPL_FR_MT_E_PR_T 0x370 #define HW_TPL_FR_MT_E_PR_P_FC 0X363 #define HW_TPL_FR_MT_E_T_P_FC 0X353 #define HW_TPL_FR_MT_PR_IV_P_FC 0X32B #define HW_TPL_FR_MT_PR_OV_P_FC 0X327 #define HW_TPL_FR_MT_T_IV_P_FC 0X31B #define HW_TPL_FR_MT_T_OV_P_FC 0X317 #define HW_TPL_FR_M_E_PR_FC 0X2E1 #define HW_TPL_FR_M_E_T_FC 0X2D1 #define HW_TPL_FR_M_PR_IV_FC 0X2A9 #define HW_TPL_FR_M_PR_OV_FC 0X2A5 #define HW_TPL_FR_M_T_IV_FC 0X299 #define HW_TPL_FR_M_T_OV_FC 0X295 #define HW_TPL_FR_E_PR_T_P 0X272 #define HW_TPL_FR_E_PR_T_FC 0X271 #define HW_TPL_FR_E_IV_FC 0X249 #define HW_TPL_FR_E_OV_FC 0X245 #define HW_TPL_FR_PR_T_IV_FC 0X239 #define HW_TPL_FR_PR_T_OV_FC 0X235 #define HW_TPL_FR_IV_OV_FC 0X20D #define HW_TPL_MT_M_E_PR 0X1E0 #define HW_TPL_MT_M_E_T 0X1D0 #define HW_TPL_MT_E_PR_T_FC 0X171 #define HW_TPL_MT_E_IV 0X148 #define HW_TPL_MT_E_OV 0X144 #define HW_TPL_MT_PR_T_IV 0X138 #define HW_TPL_MT_PR_T_OV 0X134 #define HW_TPL_M_E_PR_P 0X0E2 #define HW_TPL_M_E_T_P 0X0D2 #define HW_TPL_E_PR_T_P_FC 0X073 #define HW_TPL_E_IV_P 0X04A #define HW_TPL_E_OV_P 0X046 #define HW_TPL_PR_T_IV_P 0X03A #define HW_TPL_PR_T_OV_P 0X036 /* filter wr reply code in cookie in CPL_SET_TCB_RPL */ enum fw_filter_wr_cookie { FW_FILTER_WR_SUCCESS, FW_FILTER_WR_FLT_ADDED, FW_FILTER_WR_FLT_DELETED, FW_FILTER_WR_SMT_TBL_FULL, FW_FILTER_WR_EINVAL, }; enum fw_filter_wr_nat_mode { FW_FILTER_WR_NATMODE_NONE = 0, FW_FILTER_WR_NATMODE_DIP , FW_FILTER_WR_NATMODE_DIPDP, FW_FILTER_WR_NATMODE_DIPDPSIP, FW_FILTER_WR_NATMODE_DIPDPSP, FW_FILTER_WR_NATMODE_SIPSP, FW_FILTER_WR_NATMODE_DIPSIPSP, FW_FILTER_WR_NATMODE_FOURTUPLE, }; struct fw_filter_wr { __be32 op_pkd; __be32 len16_pkd; __be64 r3; __be32 tid_to_iq; __be32 del_filter_to_l2tix; __be16 ethtype; __be16 ethtypem; __u8 frag_to_ovlan_vldm; __u8 smac_sel; __be16 rx_chan_rx_rpl_iq; __be32 maci_to_matchtypem; __u8 ptcl; __u8 ptclm; __u8 ttyp; __u8 ttypm; __be16 ivlan; __be16 ivlanm; __be16 ovlan; __be16 ovlanm; __u8 lip[16]; __u8 lipm[16]; __u8 fip[16]; __u8 fipm[16]; __be16 lp; __be16 lpm; __be16 fp; __be16 fpm; __be16 r7; __u8 sma[6]; }; struct fw_filter2_wr { __be32 op_pkd; __be32 len16_pkd; __be64 r3; __be32 tid_to_iq; __be32 del_filter_to_l2tix; __be16 ethtype; __be16 ethtypem; __u8 frag_to_ovlan_vldm; __u8 smac_sel; __be16 rx_chan_rx_rpl_iq; __be32 maci_to_matchtypem; __u8 ptcl; __u8 ptclm; __u8 ttyp; __u8 ttypm; __be16 ivlan; __be16 ivlanm; __be16 ovlan; __be16 ovlanm; __u8 lip[16]; __u8 lipm[16]; __u8 fip[16]; __u8 fipm[16]; __be16 lp; __be16 lpm; __be16 fp; __be16 fpm; __be16 r7; __u8 sma[6]; __be16 r8; __u8 filter_type_swapmac; __u8 natmode_to_ulp_type; __be16 newlport; __be16 newfport; __u8 newlip[16]; __u8 newfip[16]; __be32 natseqcheck; __be32 r9; __be64 r10; __be64 r11; __be64 r12; __be64 r13; }; #define S_FW_FILTER_WR_TID 12 #define M_FW_FILTER_WR_TID 0xfffff #define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID) #define G_FW_FILTER_WR_TID(x) \ (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID) #define S_FW_FILTER_WR_RQTYPE 11 #define M_FW_FILTER_WR_RQTYPE 0x1 #define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE) #define G_FW_FILTER_WR_RQTYPE(x) \ (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE) #define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U) #define S_FW_FILTER_WR_NOREPLY 10 #define M_FW_FILTER_WR_NOREPLY 0x1 #define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY) #define G_FW_FILTER_WR_NOREPLY(x) \ (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY) #define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U) #define S_FW_FILTER_WR_IQ 0 #define M_FW_FILTER_WR_IQ 0x3ff #define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ) #define G_FW_FILTER_WR_IQ(x) \ (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ) #define S_FW_FILTER_WR_DEL_FILTER 31 #define M_FW_FILTER_WR_DEL_FILTER 0x1 #define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER) #define G_FW_FILTER_WR_DEL_FILTER(x) \ (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER) #define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U) #define S_FW_FILTER2_WR_DROP_ENCAP 30 #define M_FW_FILTER2_WR_DROP_ENCAP 0x1 #define V_FW_FILTER2_WR_DROP_ENCAP(x) ((x) << S_FW_FILTER2_WR_DROP_ENCAP) #define G_FW_FILTER2_WR_DROP_ENCAP(x) \ (((x) >> S_FW_FILTER2_WR_DROP_ENCAP) & M_FW_FILTER2_WR_DROP_ENCAP) #define F_FW_FILTER2_WR_DROP_ENCAP V_FW_FILTER2_WR_DROP_ENCAP(1U) #define S_FW_FILTER2_WR_TX_LOOP 29 #define M_FW_FILTER2_WR_TX_LOOP 0x1 #define V_FW_FILTER2_WR_TX_LOOP(x) ((x) << S_FW_FILTER2_WR_TX_LOOP) #define G_FW_FILTER2_WR_TX_LOOP(x) \ (((x) >> S_FW_FILTER2_WR_TX_LOOP) & M_FW_FILTER2_WR_TX_LOOP) #define F_FW_FILTER2_WR_TX_LOOP V_FW_FILTER2_WR_TX_LOOP(1U) #define S_FW_FILTER_WR_RPTTID 25 #define M_FW_FILTER_WR_RPTTID 0x1 #define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID) #define G_FW_FILTER_WR_RPTTID(x) \ (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID) #define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U) #define S_FW_FILTER_WR_DROP 24 #define M_FW_FILTER_WR_DROP 0x1 #define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP) #define G_FW_FILTER_WR_DROP(x) \ (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP) #define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U) #define S_FW_FILTER_WR_DIRSTEER 23 #define M_FW_FILTER_WR_DIRSTEER 0x1 #define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER) #define G_FW_FILTER_WR_DIRSTEER(x) \ (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER) #define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U) #define S_FW_FILTER_WR_MASKHASH 22 #define M_FW_FILTER_WR_MASKHASH 0x1 #define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH) #define G_FW_FILTER_WR_MASKHASH(x) \ (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH) #define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U) #define S_FW_FILTER_WR_DIRSTEERHASH 21 #define M_FW_FILTER_WR_DIRSTEERHASH 0x1 #define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH) #define G_FW_FILTER_WR_DIRSTEERHASH(x) \ (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH) #define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U) #define S_FW_FILTER_WR_LPBK 20 #define M_FW_FILTER_WR_LPBK 0x1 #define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK) #define G_FW_FILTER_WR_LPBK(x) \ (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK) #define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U) #define S_FW_FILTER_WR_DMAC 19 #define M_FW_FILTER_WR_DMAC 0x1 #define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC) #define G_FW_FILTER_WR_DMAC(x) \ (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC) #define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U) #define S_FW_FILTER_WR_SMAC 18 #define M_FW_FILTER_WR_SMAC 0x1 #define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC) #define G_FW_FILTER_WR_SMAC(x) \ (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC) #define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U) #define S_FW_FILTER_WR_INSVLAN 17 #define M_FW_FILTER_WR_INSVLAN 0x1 #define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN) #define G_FW_FILTER_WR_INSVLAN(x) \ (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN) #define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U) #define S_FW_FILTER_WR_RMVLAN 16 #define M_FW_FILTER_WR_RMVLAN 0x1 #define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN) #define G_FW_FILTER_WR_RMVLAN(x) \ (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN) #define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U) #define S_FW_FILTER_WR_HITCNTS 15 #define M_FW_FILTER_WR_HITCNTS 0x1 #define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS) #define G_FW_FILTER_WR_HITCNTS(x) \ (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS) #define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U) #define S_FW_FILTER_WR_TXCHAN 13 #define M_FW_FILTER_WR_TXCHAN 0x3 #define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN) #define G_FW_FILTER_WR_TXCHAN(x) \ (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN) #define S_FW_FILTER_WR_PRIO 12 #define M_FW_FILTER_WR_PRIO 0x1 #define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO) #define G_FW_FILTER_WR_PRIO(x) \ (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO) #define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U) #define S_FW_FILTER_WR_L2TIX 0 #define M_FW_FILTER_WR_L2TIX 0xfff #define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX) #define G_FW_FILTER_WR_L2TIX(x) \ (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX) #define S_FW_FILTER_WR_FRAG 7 #define M_FW_FILTER_WR_FRAG 0x1 #define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG) #define G_FW_FILTER_WR_FRAG(x) \ (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG) #define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U) #define S_FW_FILTER_WR_FRAGM 6 #define M_FW_FILTER_WR_FRAGM 0x1 #define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM) #define G_FW_FILTER_WR_FRAGM(x) \ (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM) #define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U) #define S_FW_FILTER_WR_IVLAN_VLD 5 #define M_FW_FILTER_WR_IVLAN_VLD 0x1 #define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD) #define G_FW_FILTER_WR_IVLAN_VLD(x) \ (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD) #define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U) #define S_FW_FILTER_WR_OVLAN_VLD 4 #define M_FW_FILTER_WR_OVLAN_VLD 0x1 #define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD) #define G_FW_FILTER_WR_OVLAN_VLD(x) \ (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD) #define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U) #define S_FW_FILTER_WR_IVLAN_VLDM 3 #define M_FW_FILTER_WR_IVLAN_VLDM 0x1 #define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM) #define G_FW_FILTER_WR_IVLAN_VLDM(x) \ (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM) #define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U) #define S_FW_FILTER_WR_OVLAN_VLDM 2 #define M_FW_FILTER_WR_OVLAN_VLDM 0x1 #define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM) #define G_FW_FILTER_WR_OVLAN_VLDM(x) \ (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM) #define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U) #define S_FW_FILTER_WR_RX_CHAN 15 #define M_FW_FILTER_WR_RX_CHAN 0x1 #define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN) #define G_FW_FILTER_WR_RX_CHAN(x) \ (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN) #define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U) #define S_FW_FILTER_WR_RX_RPL_IQ 0 #define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff #define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ) #define G_FW_FILTER_WR_RX_RPL_IQ(x) \ (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ) #define S_FW_FILTER2_WR_FILTER_TYPE 1 #define M_FW_FILTER2_WR_FILTER_TYPE 0x1 #define V_FW_FILTER2_WR_FILTER_TYPE(x) ((x) << S_FW_FILTER2_WR_FILTER_TYPE) #define G_FW_FILTER2_WR_FILTER_TYPE(x) \ (((x) >> S_FW_FILTER2_WR_FILTER_TYPE) & M_FW_FILTER2_WR_FILTER_TYPE) #define F_FW_FILTER2_WR_FILTER_TYPE V_FW_FILTER2_WR_FILTER_TYPE(1U) #define S_FW_FILTER2_WR_SWAPMAC 0 #define M_FW_FILTER2_WR_SWAPMAC 0x1 #define V_FW_FILTER2_WR_SWAPMAC(x) ((x) << S_FW_FILTER2_WR_SWAPMAC) #define G_FW_FILTER2_WR_SWAPMAC(x) \ (((x) >> S_FW_FILTER2_WR_SWAPMAC) & M_FW_FILTER2_WR_SWAPMAC) #define F_FW_FILTER2_WR_SWAPMAC V_FW_FILTER2_WR_SWAPMAC(1U) #define S_FW_FILTER2_WR_NATMODE 5 #define M_FW_FILTER2_WR_NATMODE 0x7 #define V_FW_FILTER2_WR_NATMODE(x) ((x) << S_FW_FILTER2_WR_NATMODE) #define G_FW_FILTER2_WR_NATMODE(x) \ (((x) >> S_FW_FILTER2_WR_NATMODE) & M_FW_FILTER2_WR_NATMODE) #define S_FW_FILTER2_WR_NATFLAGCHECK 4 #define M_FW_FILTER2_WR_NATFLAGCHECK 0x1 #define V_FW_FILTER2_WR_NATFLAGCHECK(x) ((x) << S_FW_FILTER2_WR_NATFLAGCHECK) #define G_FW_FILTER2_WR_NATFLAGCHECK(x) \ (((x) >> S_FW_FILTER2_WR_NATFLAGCHECK) & M_FW_FILTER2_WR_NATFLAGCHECK) #define F_FW_FILTER2_WR_NATFLAGCHECK V_FW_FILTER2_WR_NATFLAGCHECK(1U) #define S_FW_FILTER2_WR_ULP_TYPE 0 #define M_FW_FILTER2_WR_ULP_TYPE 0xf #define V_FW_FILTER2_WR_ULP_TYPE(x) ((x) << S_FW_FILTER2_WR_ULP_TYPE) #define G_FW_FILTER2_WR_ULP_TYPE(x) \ (((x) >> S_FW_FILTER2_WR_ULP_TYPE) & M_FW_FILTER2_WR_ULP_TYPE) #define S_FW_FILTER_WR_MACI 23 #define M_FW_FILTER_WR_MACI 0x1ff #define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI) #define G_FW_FILTER_WR_MACI(x) \ (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI) #define S_FW_FILTER_WR_MACIM 14 #define M_FW_FILTER_WR_MACIM 0x1ff #define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM) #define G_FW_FILTER_WR_MACIM(x) \ (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM) #define S_FW_FILTER_WR_FCOE 13 #define M_FW_FILTER_WR_FCOE 0x1 #define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE) #define G_FW_FILTER_WR_FCOE(x) \ (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE) #define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U) #define S_FW_FILTER_WR_FCOEM 12 #define M_FW_FILTER_WR_FCOEM 0x1 #define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM) #define G_FW_FILTER_WR_FCOEM(x) \ (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM) #define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U) #define S_FW_FILTER_WR_PORT 9 #define M_FW_FILTER_WR_PORT 0x7 #define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT) #define G_FW_FILTER_WR_PORT(x) \ (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT) #define S_FW_FILTER_WR_PORTM 6 #define M_FW_FILTER_WR_PORTM 0x7 #define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM) #define G_FW_FILTER_WR_PORTM(x) \ (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM) #define S_FW_FILTER_WR_MATCHTYPE 3 #define M_FW_FILTER_WR_MATCHTYPE 0x7 #define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE) #define G_FW_FILTER_WR_MATCHTYPE(x) \ (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE) #define S_FW_FILTER_WR_MATCHTYPEM 0 #define M_FW_FILTER_WR_MATCHTYPEM 0x7 #define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM) #define G_FW_FILTER_WR_MATCHTYPEM(x) \ (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM) struct fw_ulptx_wr { __be32 op_to_compl; __be32 flowid_len16; __u64 cookie; }; /* flag for packet type - control packet (0), data packet (1) */ #define S_FW_ULPTX_WR_DATA 28 #define M_FW_ULPTX_WR_DATA 0x1 #define V_FW_ULPTX_WR_DATA(x) ((x) << S_FW_ULPTX_WR_DATA) #define G_FW_ULPTX_WR_DATA(x) \ (((x) >> S_FW_ULPTX_WR_DATA) & M_FW_ULPTX_WR_DATA) #define F_FW_ULPTX_WR_DATA V_FW_ULPTX_WR_DATA(1U) struct fw_tp_wr { __be32 op_to_immdlen; __be32 flowid_len16; __u64 cookie; }; struct fw_eth_tx_pkt_wr { __be32 op_immdlen; __be32 equiq_to_len16; __be64 r3; }; #define S_FW_ETH_TX_PKT_WR_IMMDLEN 0 #define M_FW_ETH_TX_PKT_WR_IMMDLEN 0x1ff #define V_FW_ETH_TX_PKT_WR_IMMDLEN(x) ((x) << S_FW_ETH_TX_PKT_WR_IMMDLEN) #define G_FW_ETH_TX_PKT_WR_IMMDLEN(x) \ (((x) >> S_FW_ETH_TX_PKT_WR_IMMDLEN) & M_FW_ETH_TX_PKT_WR_IMMDLEN) struct fw_eth_tx_pkt2_wr { __be32 op_immdlen; __be32 equiq_to_len16; __be32 r3; __be32 L4ChkDisable_to_IpHdrLen; }; #define S_FW_ETH_TX_PKT2_WR_IMMDLEN 0 #define M_FW_ETH_TX_PKT2_WR_IMMDLEN 0x1ff #define V_FW_ETH_TX_PKT2_WR_IMMDLEN(x) ((x) << S_FW_ETH_TX_PKT2_WR_IMMDLEN) #define G_FW_ETH_TX_PKT2_WR_IMMDLEN(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_IMMDLEN) & M_FW_ETH_TX_PKT2_WR_IMMDLEN) #define S_FW_ETH_TX_PKT2_WR_L4CHKDISABLE 31 #define M_FW_ETH_TX_PKT2_WR_L4CHKDISABLE 0x1 #define V_FW_ETH_TX_PKT2_WR_L4CHKDISABLE(x) \ ((x) << S_FW_ETH_TX_PKT2_WR_L4CHKDISABLE) #define G_FW_ETH_TX_PKT2_WR_L4CHKDISABLE(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_L4CHKDISABLE) & \ M_FW_ETH_TX_PKT2_WR_L4CHKDISABLE) #define F_FW_ETH_TX_PKT2_WR_L4CHKDISABLE \ V_FW_ETH_TX_PKT2_WR_L4CHKDISABLE(1U) #define S_FW_ETH_TX_PKT2_WR_L3CHKDISABLE 30 #define M_FW_ETH_TX_PKT2_WR_L3CHKDISABLE 0x1 #define V_FW_ETH_TX_PKT2_WR_L3CHKDISABLE(x) \ ((x) << S_FW_ETH_TX_PKT2_WR_L3CHKDISABLE) #define G_FW_ETH_TX_PKT2_WR_L3CHKDISABLE(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_L3CHKDISABLE) & \ M_FW_ETH_TX_PKT2_WR_L3CHKDISABLE) #define F_FW_ETH_TX_PKT2_WR_L3CHKDISABLE \ V_FW_ETH_TX_PKT2_WR_L3CHKDISABLE(1U) #define S_FW_ETH_TX_PKT2_WR_IVLAN 28 #define M_FW_ETH_TX_PKT2_WR_IVLAN 0x1 #define V_FW_ETH_TX_PKT2_WR_IVLAN(x) ((x) << S_FW_ETH_TX_PKT2_WR_IVLAN) #define G_FW_ETH_TX_PKT2_WR_IVLAN(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_IVLAN) & M_FW_ETH_TX_PKT2_WR_IVLAN) #define F_FW_ETH_TX_PKT2_WR_IVLAN V_FW_ETH_TX_PKT2_WR_IVLAN(1U) #define S_FW_ETH_TX_PKT2_WR_IVLANTAG 12 #define M_FW_ETH_TX_PKT2_WR_IVLANTAG 0xffff #define V_FW_ETH_TX_PKT2_WR_IVLANTAG(x) ((x) << S_FW_ETH_TX_PKT2_WR_IVLANTAG) #define G_FW_ETH_TX_PKT2_WR_IVLANTAG(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_IVLANTAG) & M_FW_ETH_TX_PKT2_WR_IVLANTAG) #define S_FW_ETH_TX_PKT2_WR_CHKTYPE 8 #define M_FW_ETH_TX_PKT2_WR_CHKTYPE 0xf #define V_FW_ETH_TX_PKT2_WR_CHKTYPE(x) ((x) << S_FW_ETH_TX_PKT2_WR_CHKTYPE) #define G_FW_ETH_TX_PKT2_WR_CHKTYPE(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_CHKTYPE) & M_FW_ETH_TX_PKT2_WR_CHKTYPE) #define S_FW_ETH_TX_PKT2_WR_IPHDRLEN 0 #define M_FW_ETH_TX_PKT2_WR_IPHDRLEN 0xff #define V_FW_ETH_TX_PKT2_WR_IPHDRLEN(x) ((x) << S_FW_ETH_TX_PKT2_WR_IPHDRLEN) #define G_FW_ETH_TX_PKT2_WR_IPHDRLEN(x) \ (((x) >> S_FW_ETH_TX_PKT2_WR_IPHDRLEN) & M_FW_ETH_TX_PKT2_WR_IPHDRLEN) struct fw_eth_tx_pkts_wr { __be32 op_pkd; __be32 equiq_to_len16; __be32 r3; __be16 plen; __u8 npkt; __u8 type; }; #define S_FW_PTP_TX_PKT_WR_IMMDLEN 0 #define M_FW_PTP_TX_PKT_WR_IMMDLEN 0x1ff #define V_FW_PTP_TX_PKT_WR_IMMDLEN(x) ((x) << S_FW_PTP_TX_PKT_WR_IMMDLEN) #define G_FW_PTP_TX_PKT_WR_IMMDLEN(x) \ (((x) >> S_FW_PTP_TX_PKT_WR_IMMDLEN) & M_FW_PTP_TX_PKT_WR_IMMDLEN) struct fw_eth_tx_pkt_ptp_wr { __be32 op_immdlen; __be32 equiq_to_len16; __be64 r3; }; enum fw_eth_tx_eo_type { FW_ETH_TX_EO_TYPE_UDPSEG, FW_ETH_TX_EO_TYPE_TCPSEG, FW_ETH_TX_EO_TYPE_NVGRESEG, FW_ETH_TX_EO_TYPE_VXLANSEG, FW_ETH_TX_EO_TYPE_GENEVESEG, }; struct fw_eth_tx_eo_wr { __be32 op_immdlen; __be32 equiq_to_len16; __be64 r3; union fw_eth_tx_eo { struct fw_eth_tx_eo_udpseg { __u8 type; __u8 ethlen; __be16 iplen; __u8 udplen; __u8 rtplen; __be16 r4; __be16 mss; __be16 schedpktsize; __be32 plen; } udpseg; struct fw_eth_tx_eo_tcpseg { __u8 type; __u8 ethlen; __be16 iplen; __u8 tcplen; __u8 tsclk_tsoff; __be16 r4; __be16 mss; __be16 r5; __be32 plen; } tcpseg; struct fw_eth_tx_eo_nvgreseg { __u8 type; __u8 iphdroffout; __be16 grehdroff; __be16 iphdroffin; __be16 tcphdroffin; __be16 mss; __be16 r4; __be32 plen; } nvgreseg; struct fw_eth_tx_eo_vxlanseg { __u8 type; __u8 iphdroffout; __be16 vxlanhdroff; __be16 iphdroffin; __be16 tcphdroffin; __be16 mss; __be16 r4; __be32 plen; } vxlanseg; struct fw_eth_tx_eo_geneveseg { __u8 type; __u8 iphdroffout; __be16 genevehdroff; __be16 iphdroffin; __be16 tcphdroffin; __be16 mss; __be16 r4; __be32 plen; } geneveseg; } u; }; #define S_FW_ETH_TX_EO_WR_IMMDLEN 0 #define M_FW_ETH_TX_EO_WR_IMMDLEN 0x1ff #define V_FW_ETH_TX_EO_WR_IMMDLEN(x) ((x) << S_FW_ETH_TX_EO_WR_IMMDLEN) #define G_FW_ETH_TX_EO_WR_IMMDLEN(x) \ (((x) >> S_FW_ETH_TX_EO_WR_IMMDLEN) & M_FW_ETH_TX_EO_WR_IMMDLEN) #define S_FW_ETH_TX_EO_WR_TSCLK 6 #define M_FW_ETH_TX_EO_WR_TSCLK 0x3 #define V_FW_ETH_TX_EO_WR_TSCLK(x) ((x) << S_FW_ETH_TX_EO_WR_TSCLK) #define G_FW_ETH_TX_EO_WR_TSCLK(x) \ (((x) >> S_FW_ETH_TX_EO_WR_TSCLK) & M_FW_ETH_TX_EO_WR_TSCLK) #define S_FW_ETH_TX_EO_WR_TSOFF 0 #define M_FW_ETH_TX_EO_WR_TSOFF 0x3f #define V_FW_ETH_TX_EO_WR_TSOFF(x) ((x) << S_FW_ETH_TX_EO_WR_TSOFF) #define G_FW_ETH_TX_EO_WR_TSOFF(x) \ (((x) >> S_FW_ETH_TX_EO_WR_TSOFF) & M_FW_ETH_TX_EO_WR_TSOFF) struct fw_eq_flush_wr { __u8 opcode; __u8 r1[3]; __be32 equiq_to_len16; __be64 r3; }; struct fw_ofld_connection_wr { __be32 op_compl; __be32 len16_pkd; __u64 cookie; __be64 r2; __be64 r3; struct fw_ofld_connection_le { __be32 version_cpl; __be32 filter; __be32 r1; __be16 lport; __be16 pport; union fw_ofld_connection_leip { struct fw_ofld_connection_le_ipv4 { __be32 pip; __be32 lip; __be64 r0; __be64 r1; __be64 r2; } ipv4; struct fw_ofld_connection_le_ipv6 { __be64 pip_hi; __be64 pip_lo; __be64 lip_hi; __be64 lip_lo; } ipv6; } u; } le; struct fw_ofld_connection_tcb { __be32 t_state_to_astid; __be16 cplrxdataack_cplpassacceptrpl; __be16 rcv_adv; __be32 rcv_nxt; __be32 tx_max; __be64 opt0; __be32 opt2; __be32 r1; __be64 r2; __be64 r3; } tcb; }; #define S_FW_OFLD_CONNECTION_WR_VERSION 31 #define M_FW_OFLD_CONNECTION_WR_VERSION 0x1 #define V_FW_OFLD_CONNECTION_WR_VERSION(x) \ ((x) << S_FW_OFLD_CONNECTION_WR_VERSION) #define G_FW_OFLD_CONNECTION_WR_VERSION(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \ M_FW_OFLD_CONNECTION_WR_VERSION) #define F_FW_OFLD_CONNECTION_WR_VERSION V_FW_OFLD_CONNECTION_WR_VERSION(1U) #define S_FW_OFLD_CONNECTION_WR_CPL 30 #define M_FW_OFLD_CONNECTION_WR_CPL 0x1 #define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL) #define G_FW_OFLD_CONNECTION_WR_CPL(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL) #define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U) #define S_FW_OFLD_CONNECTION_WR_T_STATE 28 #define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf #define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \ ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE) #define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \ M_FW_OFLD_CONNECTION_WR_T_STATE) #define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24 #define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf #define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \ ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE) #define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \ M_FW_OFLD_CONNECTION_WR_RCV_SCALE) #define S_FW_OFLD_CONNECTION_WR_ASTID 0 #define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff #define V_FW_OFLD_CONNECTION_WR_ASTID(x) \ ((x) << S_FW_OFLD_CONNECTION_WR_ASTID) #define G_FW_OFLD_CONNECTION_WR_ASTID(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID) #define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15 #define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1 #define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \ ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) #define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \ M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) #define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \ V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U) #define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14 #define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1 #define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \ ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) #define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \ (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \ M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) #define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \ V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U) enum fw_flowc_mnem_tcpstate { FW_FLOWC_MNEM_TCPSTATE_CLOSED = 0, /* illegal */ FW_FLOWC_MNEM_TCPSTATE_LISTEN = 1, /* illegal */ FW_FLOWC_MNEM_TCPSTATE_SYNSENT = 2, /* illegal */ FW_FLOWC_MNEM_TCPSTATE_SYNRECEIVED = 3, /* illegal */ FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED = 4, /* default */ FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT = 5, /* got peer close already */ FW_FLOWC_MNEM_TCPSTATE_FINWAIT1 = 6, /* haven't gotten ACK for FIN and * will resend FIN - equiv ESTAB */ FW_FLOWC_MNEM_TCPSTATE_CLOSING = 7, /* haven't gotten ACK for FIN and * will resend FIN but have * received FIN */ FW_FLOWC_MNEM_TCPSTATE_LASTACK = 8, /* haven't gotten ACK for FIN and * will resend FIN but have * received FIN */ FW_FLOWC_MNEM_TCPSTATE_FINWAIT2 = 9, /* sent FIN and got FIN + ACK, * waiting for FIN */ FW_FLOWC_MNEM_TCPSTATE_TIMEWAIT = 10, /* not expected */ }; enum fw_flowc_mnem_eostate { FW_FLOWC_MNEM_EOSTATE_CLOSED = 0, /* illegal */ FW_FLOWC_MNEM_EOSTATE_ESTABLISHED = 1, /* default */ FW_FLOWC_MNEM_EOSTATE_CLOSING = 2, /* graceful close, after sending * outstanding payload */ FW_FLOWC_MNEM_EOSTATE_ABORTING = 3, /* immediate close, after * discarding outstanding payload */ }; enum fw_flowc_mnem { FW_FLOWC_MNEM_PFNVFN = 0, /* PFN [15:8] VFN [7:0] */ FW_FLOWC_MNEM_CH = 1, FW_FLOWC_MNEM_PORT = 2, FW_FLOWC_MNEM_IQID = 3, FW_FLOWC_MNEM_SNDNXT = 4, FW_FLOWC_MNEM_RCVNXT = 5, FW_FLOWC_MNEM_SNDBUF = 6, FW_FLOWC_MNEM_MSS = 7, FW_FLOWC_MNEM_TXDATAPLEN_MAX = 8, FW_FLOWC_MNEM_TCPSTATE = 9, FW_FLOWC_MNEM_EOSTATE = 10, FW_FLOWC_MNEM_SCHEDCLASS = 11, FW_FLOWC_MNEM_DCBPRIO = 12, FW_FLOWC_MNEM_SND_SCALE = 13, FW_FLOWC_MNEM_RCV_SCALE = 14, FW_FLOWC_MNEM_ULP_MODE = 15, FW_FLOWC_MNEM_MAX = 16, }; struct fw_flowc_mnemval { __u8 mnemonic; __u8 r4[3]; __be32 val; }; struct fw_flowc_wr { __be32 op_to_nparams; __be32 flowid_len16; #ifndef C99_NOT_SUPPORTED struct fw_flowc_mnemval mnemval[0]; #endif }; #define S_FW_FLOWC_WR_NPARAMS 0 #define M_FW_FLOWC_WR_NPARAMS 0xff #define V_FW_FLOWC_WR_NPARAMS(x) ((x) << S_FW_FLOWC_WR_NPARAMS) #define G_FW_FLOWC_WR_NPARAMS(x) \ (((x) >> S_FW_FLOWC_WR_NPARAMS) & M_FW_FLOWC_WR_NPARAMS) struct fw_ofld_tx_data_wr { __be32 op_to_immdlen; __be32 flowid_len16; __be32 plen; __be32 lsodisable_to_flags; }; #define S_FW_OFLD_TX_DATA_WR_LSODISABLE 31 #define M_FW_OFLD_TX_DATA_WR_LSODISABLE 0x1 #define V_FW_OFLD_TX_DATA_WR_LSODISABLE(x) \ ((x) << S_FW_OFLD_TX_DATA_WR_LSODISABLE) #define G_FW_OFLD_TX_DATA_WR_LSODISABLE(x) \ (((x) >> S_FW_OFLD_TX_DATA_WR_LSODISABLE) & \ M_FW_OFLD_TX_DATA_WR_LSODISABLE) #define F_FW_OFLD_TX_DATA_WR_LSODISABLE V_FW_OFLD_TX_DATA_WR_LSODISABLE(1U) #define S_FW_OFLD_TX_DATA_WR_ALIGNPLD 30 #define M_FW_OFLD_TX_DATA_WR_ALIGNPLD 0x1 #define V_FW_OFLD_TX_DATA_WR_ALIGNPLD(x) \ ((x) << S_FW_OFLD_TX_DATA_WR_ALIGNPLD) #define G_FW_OFLD_TX_DATA_WR_ALIGNPLD(x) \ (((x) >> S_FW_OFLD_TX_DATA_WR_ALIGNPLD) & M_FW_OFLD_TX_DATA_WR_ALIGNPLD) #define F_FW_OFLD_TX_DATA_WR_ALIGNPLD V_FW_OFLD_TX_DATA_WR_ALIGNPLD(1U) #define S_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE 29 #define M_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE 0x1 #define V_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE(x) \ ((x) << S_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE) #define G_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE(x) \ (((x) >> S_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE) & \ M_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE) #define F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE \ V_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE(1U) #define S_FW_OFLD_TX_DATA_WR_FLAGS 0 #define M_FW_OFLD_TX_DATA_WR_FLAGS 0xfffffff #define V_FW_OFLD_TX_DATA_WR_FLAGS(x) ((x) << S_FW_OFLD_TX_DATA_WR_FLAGS) #define G_FW_OFLD_TX_DATA_WR_FLAGS(x) \ (((x) >> S_FW_OFLD_TX_DATA_WR_FLAGS) & M_FW_OFLD_TX_DATA_WR_FLAGS) /* Use fw_ofld_tx_data_wr structure */ #define S_FW_ISCSI_TX_DATA_WR_FLAGS_HI 10 #define M_FW_ISCSI_TX_DATA_WR_FLAGS_HI 0x3fffff #define V_FW_ISCSI_TX_DATA_WR_FLAGS_HI(x) \ ((x) << S_FW_ISCSI_TX_DATA_WR_FLAGS_HI) #define G_FW_ISCSI_TX_DATA_WR_FLAGS_HI(x) \ (((x) >> S_FW_ISCSI_TX_DATA_WR_FLAGS_HI) & M_FW_ISCSI_TX_DATA_WR_FLAGS_HI) #define S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO 9 #define M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO 0x1 #define V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO(x) \ ((x) << S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO) #define G_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO(x) \ (((x) >> S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO) & \ M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO) #define F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO \ V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO(1U) #define S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI 8 #define M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI 0x1 #define V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI(x) \ ((x) << S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI) #define G_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI(x) \ (((x) >> S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI) & \ M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI) #define F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI \ V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI(1U) #define S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC 7 #define M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC 0x1 #define V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC(x) \ ((x) << S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC) #define G_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC(x) \ (((x) >> S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC) & \ M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC) #define F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC \ V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC(1U) #define S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC 6 #define M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC 0x1 #define V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC(x) \ ((x) << S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC) #define G_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC(x) \ (((x) >> S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC) & \ M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC) #define F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC \ V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC(1U) #define S_FW_ISCSI_TX_DATA_WR_FLAGS_LO 0 #define M_FW_ISCSI_TX_DATA_WR_FLAGS_LO 0x3f #define V_FW_ISCSI_TX_DATA_WR_FLAGS_LO(x) \ ((x) << S_FW_ISCSI_TX_DATA_WR_FLAGS_LO) #define G_FW_ISCSI_TX_DATA_WR_FLAGS_LO(x) \ (((x) >> S_FW_ISCSI_TX_DATA_WR_FLAGS_LO) & M_FW_ISCSI_TX_DATA_WR_FLAGS_LO) struct fw_cmd_wr { __be32 op_dma; __be32 len16_pkd; __be64 cookie_daddr; }; #define S_FW_CMD_WR_DMA 17 #define M_FW_CMD_WR_DMA 0x1 #define V_FW_CMD_WR_DMA(x) ((x) << S_FW_CMD_WR_DMA) #define G_FW_CMD_WR_DMA(x) (((x) >> S_FW_CMD_WR_DMA) & M_FW_CMD_WR_DMA) #define F_FW_CMD_WR_DMA V_FW_CMD_WR_DMA(1U) struct fw_eth_tx_pkt_vm_wr { __be32 op_immdlen; __be32 equiq_to_len16; __be32 r3[2]; __u8 ethmacdst[6]; __u8 ethmacsrc[6]; __be16 ethtype; __be16 vlantci; }; struct fw_eth_tx_pkts_vm_wr { __be32 op_pkd; __be32 equiq_to_len16; __be32 r3; __be16 plen; __u8 npkt; __u8 r4; __u8 ethmacdst[6]; __u8 ethmacsrc[6]; __be16 ethtype; __be16 vlantci; }; /****************************************************************************** * R I W O R K R E Q U E S T s **************************************/ enum fw_ri_wr_opcode { FW_RI_RDMA_WRITE = 0x0, /* IETF RDMAP v1.0 ... */ FW_RI_READ_REQ = 0x1, FW_RI_READ_RESP = 0x2, FW_RI_SEND = 0x3, FW_RI_SEND_WITH_INV = 0x4, FW_RI_SEND_WITH_SE = 0x5, FW_RI_SEND_WITH_SE_INV = 0x6, FW_RI_TERMINATE = 0x7, FW_RI_RDMA_INIT = 0x8, /* CHELSIO RI specific ... */ FW_RI_BIND_MW = 0x9, FW_RI_FAST_REGISTER = 0xa, FW_RI_LOCAL_INV = 0xb, FW_RI_QP_MODIFY = 0xc, FW_RI_BYPASS = 0xd, FW_RI_RECEIVE = 0xe, #if 0 FW_RI_SEND_IMMEDIATE = 0x8, FW_RI_SEND_IMMEDIATE_WITH_SE = 0x9, FW_RI_ATOMIC_REQUEST = 0xa, FW_RI_ATOMIC_RESPONSE = 0xb, FW_RI_BIND_MW = 0xc, /* CHELSIO RI specific ... */ FW_RI_FAST_REGISTER = 0xd, FW_RI_LOCAL_INV = 0xe, #endif FW_RI_SGE_EC_CR_RETURN = 0xf, FW_RI_WRITE_IMMEDIATE = FW_RI_RDMA_INIT, }; enum fw_ri_wr_flags { FW_RI_COMPLETION_FLAG = 0x01, FW_RI_NOTIFICATION_FLAG = 0x02, FW_RI_SOLICITED_EVENT_FLAG = 0x04, FW_RI_READ_FENCE_FLAG = 0x08, FW_RI_LOCAL_FENCE_FLAG = 0x10, FW_RI_RDMA_READ_INVALIDATE = 0x20, FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40 }; enum fw_ri_mpa_attrs { FW_RI_MPA_RX_MARKER_ENABLE = 0x01, FW_RI_MPA_TX_MARKER_ENABLE = 0x02, FW_RI_MPA_CRC_ENABLE = 0x04, FW_RI_MPA_IETF_ENABLE = 0x08 }; enum fw_ri_qp_caps { FW_RI_QP_RDMA_READ_ENABLE = 0x01, FW_RI_QP_RDMA_WRITE_ENABLE = 0x02, FW_RI_QP_BIND_ENABLE = 0x04, FW_RI_QP_FAST_REGISTER_ENABLE = 0x08, FW_RI_QP_STAG0_ENABLE = 0x10, FW_RI_QP_RDMA_READ_REQ_0B_ENABLE= 0x80, }; enum fw_ri_addr_type { FW_RI_ZERO_BASED_TO = 0x00, FW_RI_VA_BASED_TO = 0x01 }; enum fw_ri_mem_perms { FW_RI_MEM_ACCESS_REM_WRITE = 0x01, FW_RI_MEM_ACCESS_REM_READ = 0x02, FW_RI_MEM_ACCESS_REM = 0x03, FW_RI_MEM_ACCESS_LOCAL_WRITE = 0x04, FW_RI_MEM_ACCESS_LOCAL_READ = 0x08, FW_RI_MEM_ACCESS_LOCAL = 0x0C }; enum fw_ri_stag_type { FW_RI_STAG_NSMR = 0x00, FW_RI_STAG_SMR = 0x01, FW_RI_STAG_MW = 0x02, FW_RI_STAG_MW_RELAXED = 0x03 }; enum fw_ri_data_op { FW_RI_DATA_IMMD = 0x81, FW_RI_DATA_DSGL = 0x82, FW_RI_DATA_ISGL = 0x83 }; enum fw_ri_sgl_depth { FW_RI_SGL_DEPTH_MAX_SQ = 16, FW_RI_SGL_DEPTH_MAX_RQ = 4 }; enum fw_ri_cqe_err { FW_RI_CQE_ERR_SUCCESS = 0x00, /* success, no error detected */ FW_RI_CQE_ERR_STAG = 0x01, /* STAG invalid */ FW_RI_CQE_ERR_PDID = 0x02, /* PDID mismatch */ FW_RI_CQE_ERR_QPID = 0x03, /* QPID mismatch */ FW_RI_CQE_ERR_ACCESS = 0x04, /* Invalid access right */ FW_RI_CQE_ERR_WRAP = 0x05, /* Wrap error */ FW_RI_CQE_ERR_BOUND = 0x06, /* base and bounds violation */ FW_RI_CQE_ERR_INVALIDATE_SHARED_MR = 0x07, /* attempt to invalidate a SMR */ FW_RI_CQE_ERR_INVALIDATE_MR_WITH_MW_BOUND = 0x08, /* attempt to invalidate a MR w MW */ FW_RI_CQE_ERR_ECC = 0x09, /* ECC error detected */ FW_RI_CQE_ERR_ECC_PSTAG = 0x0A, /* ECC error detected when reading the PSTAG for a MW Invalidate */ FW_RI_CQE_ERR_PBL_ADDR_BOUND = 0x0B, /* pbl address out of bound : software error */ FW_RI_CQE_ERR_CRC = 0x10, /* CRC error */ FW_RI_CQE_ERR_MARKER = 0x11, /* Marker error */ FW_RI_CQE_ERR_PDU_LEN_ERR = 0x12, /* invalid PDU length */ FW_RI_CQE_ERR_OUT_OF_RQE = 0x13, /* out of RQE */ FW_RI_CQE_ERR_DDP_VERSION = 0x14, /* wrong DDP version */ FW_RI_CQE_ERR_RDMA_VERSION = 0x15, /* wrong RDMA version */ FW_RI_CQE_ERR_OPCODE = 0x16, /* invalid rdma opcode */ FW_RI_CQE_ERR_DDP_QUEUE_NUM = 0x17, /* invalid ddp queue number */ FW_RI_CQE_ERR_MSN = 0x18, /* MSN error */ FW_RI_CQE_ERR_TBIT = 0x19, /* tag bit not set correctly */ FW_RI_CQE_ERR_MO = 0x1A, /* MO not zero for TERMINATE or READ_REQ */ FW_RI_CQE_ERR_MSN_GAP = 0x1B, /* */ FW_RI_CQE_ERR_MSN_RANGE = 0x1C, /* */ FW_RI_CQE_ERR_IRD_OVERFLOW = 0x1D, /* */ FW_RI_CQE_ERR_RQE_ADDR_BOUND = 0x1E, /* RQE address out of bound : software error */ FW_RI_CQE_ERR_INTERNAL_ERR = 0x1F /* internel error (opcode mismatch) */ }; struct fw_ri_dsge_pair { __be32 len[2]; __be64 addr[2]; }; struct fw_ri_dsgl { __u8 op; __u8 r1; __be16 nsge; __be32 len0; __be64 addr0; #ifndef C99_NOT_SUPPORTED struct fw_ri_dsge_pair sge[0]; #endif }; struct fw_ri_sge { __be32 stag; __be32 len; __be64 to; }; struct fw_ri_isgl { __u8 op; __u8 r1; __be16 nsge; __be32 r2; #ifndef C99_NOT_SUPPORTED struct fw_ri_sge sge[0]; #endif }; struct fw_ri_immd { __u8 op; __u8 r1; __be16 r2; __be32 immdlen; #ifndef C99_NOT_SUPPORTED __u8 data[0]; #endif }; struct fw_ri_tpte { __be32 valid_to_pdid; __be32 locread_to_qpid; __be32 nosnoop_pbladdr; __be32 len_lo; __be32 va_hi; __be32 va_lo_fbo; __be32 dca_mwbcnt_pstag; __be32 len_hi; }; #define S_FW_RI_TPTE_VALID 31 #define M_FW_RI_TPTE_VALID 0x1 #define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID) #define G_FW_RI_TPTE_VALID(x) \ (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID) #define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U) #define S_FW_RI_TPTE_STAGKEY 23 #define M_FW_RI_TPTE_STAGKEY 0xff #define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY) #define G_FW_RI_TPTE_STAGKEY(x) \ (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY) #define S_FW_RI_TPTE_STAGSTATE 22 #define M_FW_RI_TPTE_STAGSTATE 0x1 #define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE) #define G_FW_RI_TPTE_STAGSTATE(x) \ (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE) #define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U) #define S_FW_RI_TPTE_STAGTYPE 20 #define M_FW_RI_TPTE_STAGTYPE 0x3 #define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE) #define G_FW_RI_TPTE_STAGTYPE(x) \ (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE) #define S_FW_RI_TPTE_PDID 0 #define M_FW_RI_TPTE_PDID 0xfffff #define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID) #define G_FW_RI_TPTE_PDID(x) \ (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID) #define S_FW_RI_TPTE_PERM 28 #define M_FW_RI_TPTE_PERM 0xf #define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM) #define G_FW_RI_TPTE_PERM(x) \ (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM) #define S_FW_RI_TPTE_REMINVDIS 27 #define M_FW_RI_TPTE_REMINVDIS 0x1 #define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS) #define G_FW_RI_TPTE_REMINVDIS(x) \ (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS) #define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U) #define S_FW_RI_TPTE_ADDRTYPE 26 #define M_FW_RI_TPTE_ADDRTYPE 1 #define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE) #define G_FW_RI_TPTE_ADDRTYPE(x) \ (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE) #define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U) #define S_FW_RI_TPTE_MWBINDEN 25 #define M_FW_RI_TPTE_MWBINDEN 0x1 #define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN) #define G_FW_RI_TPTE_MWBINDEN(x) \ (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN) #define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U) #define S_FW_RI_TPTE_PS 20 #define M_FW_RI_TPTE_PS 0x1f #define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS) #define G_FW_RI_TPTE_PS(x) \ (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS) #define S_FW_RI_TPTE_QPID 0 #define M_FW_RI_TPTE_QPID 0xfffff #define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID) #define G_FW_RI_TPTE_QPID(x) \ (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID) #define S_FW_RI_TPTE_NOSNOOP 31 #define M_FW_RI_TPTE_NOSNOOP 0x1 #define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP) #define G_FW_RI_TPTE_NOSNOOP(x) \ (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP) #define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U) #define S_FW_RI_TPTE_PBLADDR 0 #define M_FW_RI_TPTE_PBLADDR 0x1fffffff #define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR) #define G_FW_RI_TPTE_PBLADDR(x) \ (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR) #define S_FW_RI_TPTE_DCA 24 #define M_FW_RI_TPTE_DCA 0x1f #define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA) #define G_FW_RI_TPTE_DCA(x) \ (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA) #define S_FW_RI_TPTE_MWBCNT_PSTAG 0 #define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff #define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \ ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG) #define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \ (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG) enum fw_ri_cqe_rxtx { FW_RI_CQE_RXTX_RX = 0x0, FW_RI_CQE_RXTX_TX = 0x1, }; struct fw_ri_cqe { union fw_ri_rxtx { struct fw_ri_scqe { __be32 qpid_n_stat_rxtx_type; __be32 plen; __be32 stag; __be32 wrid; } scqe; struct fw_ri_rcqe { __be32 qpid_n_stat_rxtx_type; __be32 plen; __be32 stag; __be32 msn; } rcqe; struct fw_ri_rcqe_imm { __be32 qpid_n_stat_rxtx_type; __be32 plen; __be32 mo; __be32 msn; __u64 imm_data; } imm_data_rcqe; } u; }; #define S_FW_RI_CQE_QPID 12 #define M_FW_RI_CQE_QPID 0xfffff #define V_FW_RI_CQE_QPID(x) ((x) << S_FW_RI_CQE_QPID) #define G_FW_RI_CQE_QPID(x) \ (((x) >> S_FW_RI_CQE_QPID) & M_FW_RI_CQE_QPID) #define S_FW_RI_CQE_NOTIFY 10 #define M_FW_RI_CQE_NOTIFY 0x1 #define V_FW_RI_CQE_NOTIFY(x) ((x) << S_FW_RI_CQE_NOTIFY) #define G_FW_RI_CQE_NOTIFY(x) \ (((x) >> S_FW_RI_CQE_NOTIFY) & M_FW_RI_CQE_NOTIFY) #define S_FW_RI_CQE_STATUS 5 #define M_FW_RI_CQE_STATUS 0x1f #define V_FW_RI_CQE_STATUS(x) ((x) << S_FW_RI_CQE_STATUS) #define G_FW_RI_CQE_STATUS(x) \ (((x) >> S_FW_RI_CQE_STATUS) & M_FW_RI_CQE_STATUS) #define S_FW_RI_CQE_RXTX 4 #define M_FW_RI_CQE_RXTX 0x1 #define V_FW_RI_CQE_RXTX(x) ((x) << S_FW_RI_CQE_RXTX) #define G_FW_RI_CQE_RXTX(x) \ (((x) >> S_FW_RI_CQE_RXTX) & M_FW_RI_CQE_RXTX) #define S_FW_RI_CQE_TYPE 0 #define M_FW_RI_CQE_TYPE 0xf #define V_FW_RI_CQE_TYPE(x) ((x) << S_FW_RI_CQE_TYPE) #define G_FW_RI_CQE_TYPE(x) \ (((x) >> S_FW_RI_CQE_TYPE) & M_FW_RI_CQE_TYPE) enum fw_ri_res_type { FW_RI_RES_TYPE_SQ, FW_RI_RES_TYPE_RQ, FW_RI_RES_TYPE_CQ, FW_RI_RES_TYPE_SRQ, }; enum fw_ri_res_op { FW_RI_RES_OP_WRITE, FW_RI_RES_OP_RESET, }; struct fw_ri_res { union fw_ri_restype { struct fw_ri_res_sqrq { __u8 restype; __u8 op; __be16 r3; __be32 eqid; __be32 r4[2]; __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; } sqrq; struct fw_ri_res_cq { __u8 restype; __u8 op; __be16 r3; __be32 iqid; __be32 r4[2]; __be32 iqandst_to_iqandstindex; __be16 iqdroprss_to_iqesize; __be16 iqsize; __be64 iqaddr; __be32 iqns_iqro; __be32 r6_lo; __be64 r7; } cq; struct fw_ri_res_srq { __u8 restype; __u8 op; __be16 r3; __be32 eqid; __be32 r4[2]; __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; __be32 srqid; __be32 pdid; __be32 hwsrqsize; __be32 hwsrqaddr; } srq; } u; }; struct fw_ri_res_wr { __be32 op_nres; __be32 len16_pkd; __u64 cookie; #ifndef C99_NOT_SUPPORTED struct fw_ri_res res[0]; #endif }; #define S_FW_RI_RES_WR_VFN 8 #define M_FW_RI_RES_WR_VFN 0xff #define V_FW_RI_RES_WR_VFN(x) ((x) << S_FW_RI_RES_WR_VFN) #define G_FW_RI_RES_WR_VFN(x) \ (((x) >> S_FW_RI_RES_WR_VFN) & M_FW_RI_RES_WR_VFN) #define S_FW_RI_RES_WR_NRES 0 #define M_FW_RI_RES_WR_NRES 0xff #define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES) #define G_FW_RI_RES_WR_NRES(x) \ (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES) #define S_FW_RI_RES_WR_FETCHSZM 26 #define M_FW_RI_RES_WR_FETCHSZM 0x1 #define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM) #define G_FW_RI_RES_WR_FETCHSZM(x) \ (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM) #define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U) #define S_FW_RI_RES_WR_STATUSPGNS 25 #define M_FW_RI_RES_WR_STATUSPGNS 0x1 #define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS) #define G_FW_RI_RES_WR_STATUSPGNS(x) \ (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS) #define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U) #define S_FW_RI_RES_WR_STATUSPGRO 24 #define M_FW_RI_RES_WR_STATUSPGRO 0x1 #define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO) #define G_FW_RI_RES_WR_STATUSPGRO(x) \ (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO) #define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U) #define S_FW_RI_RES_WR_FETCHNS 23 #define M_FW_RI_RES_WR_FETCHNS 0x1 #define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS) #define G_FW_RI_RES_WR_FETCHNS(x) \ (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS) #define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U) #define S_FW_RI_RES_WR_FETCHRO 22 #define M_FW_RI_RES_WR_FETCHRO 0x1 #define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO) #define G_FW_RI_RES_WR_FETCHRO(x) \ (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO) #define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U) #define S_FW_RI_RES_WR_HOSTFCMODE 20 #define M_FW_RI_RES_WR_HOSTFCMODE 0x3 #define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE) #define G_FW_RI_RES_WR_HOSTFCMODE(x) \ (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE) #define S_FW_RI_RES_WR_CPRIO 19 #define M_FW_RI_RES_WR_CPRIO 0x1 #define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO) #define G_FW_RI_RES_WR_CPRIO(x) \ (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO) #define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U) #define S_FW_RI_RES_WR_ONCHIP 18 #define M_FW_RI_RES_WR_ONCHIP 0x1 #define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP) #define G_FW_RI_RES_WR_ONCHIP(x) \ (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP) #define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U) #define S_FW_RI_RES_WR_PCIECHN 16 #define M_FW_RI_RES_WR_PCIECHN 0x3 #define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN) #define G_FW_RI_RES_WR_PCIECHN(x) \ (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN) #define S_FW_RI_RES_WR_IQID 0 #define M_FW_RI_RES_WR_IQID 0xffff #define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID) #define G_FW_RI_RES_WR_IQID(x) \ (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID) #define S_FW_RI_RES_WR_DCAEN 31 #define M_FW_RI_RES_WR_DCAEN 0x1 #define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN) #define G_FW_RI_RES_WR_DCAEN(x) \ (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN) #define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U) #define S_FW_RI_RES_WR_DCACPU 26 #define M_FW_RI_RES_WR_DCACPU 0x1f #define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU) #define G_FW_RI_RES_WR_DCACPU(x) \ (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU) #define S_FW_RI_RES_WR_FBMIN 23 #define M_FW_RI_RES_WR_FBMIN 0x7 #define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN) #define G_FW_RI_RES_WR_FBMIN(x) \ (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN) #define S_FW_RI_RES_WR_FBMAX 20 #define M_FW_RI_RES_WR_FBMAX 0x7 #define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX) #define G_FW_RI_RES_WR_FBMAX(x) \ (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX) #define S_FW_RI_RES_WR_CIDXFTHRESHO 19 #define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1 #define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO) #define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \ (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO) #define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U) #define S_FW_RI_RES_WR_CIDXFTHRESH 16 #define M_FW_RI_RES_WR_CIDXFTHRESH 0x7 #define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH) #define G_FW_RI_RES_WR_CIDXFTHRESH(x) \ (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH) #define S_FW_RI_RES_WR_EQSIZE 0 #define M_FW_RI_RES_WR_EQSIZE 0xffff #define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE) #define G_FW_RI_RES_WR_EQSIZE(x) \ (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE) #define S_FW_RI_RES_WR_IQANDST 15 #define M_FW_RI_RES_WR_IQANDST 0x1 #define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST) #define G_FW_RI_RES_WR_IQANDST(x) \ (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST) #define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U) #define S_FW_RI_RES_WR_IQANUS 14 #define M_FW_RI_RES_WR_IQANUS 0x1 #define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS) #define G_FW_RI_RES_WR_IQANUS(x) \ (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS) #define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U) #define S_FW_RI_RES_WR_IQANUD 12 #define M_FW_RI_RES_WR_IQANUD 0x3 #define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD) #define G_FW_RI_RES_WR_IQANUD(x) \ (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD) #define S_FW_RI_RES_WR_IQANDSTINDEX 0 #define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff #define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX) #define G_FW_RI_RES_WR_IQANDSTINDEX(x) \ (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX) #define S_FW_RI_RES_WR_IQDROPRSS 15 #define M_FW_RI_RES_WR_IQDROPRSS 0x1 #define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS) #define G_FW_RI_RES_WR_IQDROPRSS(x) \ (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS) #define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U) #define S_FW_RI_RES_WR_IQGTSMODE 14 #define M_FW_RI_RES_WR_IQGTSMODE 0x1 #define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE) #define G_FW_RI_RES_WR_IQGTSMODE(x) \ (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE) #define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U) #define S_FW_RI_RES_WR_IQPCIECH 12 #define M_FW_RI_RES_WR_IQPCIECH 0x3 #define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH) #define G_FW_RI_RES_WR_IQPCIECH(x) \ (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH) #define S_FW_RI_RES_WR_IQDCAEN 11 #define M_FW_RI_RES_WR_IQDCAEN 0x1 #define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN) #define G_FW_RI_RES_WR_IQDCAEN(x) \ (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN) #define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U) #define S_FW_RI_RES_WR_IQDCACPU 6 #define M_FW_RI_RES_WR_IQDCACPU 0x1f #define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU) #define G_FW_RI_RES_WR_IQDCACPU(x) \ (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU) #define S_FW_RI_RES_WR_IQINTCNTTHRESH 4 #define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3 #define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \ ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH) #define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \ (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH) #define S_FW_RI_RES_WR_IQO 3 #define M_FW_RI_RES_WR_IQO 0x1 #define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO) #define G_FW_RI_RES_WR_IQO(x) \ (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO) #define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U) #define S_FW_RI_RES_WR_IQCPRIO 2 #define M_FW_RI_RES_WR_IQCPRIO 0x1 #define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO) #define G_FW_RI_RES_WR_IQCPRIO(x) \ (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO) #define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U) #define S_FW_RI_RES_WR_IQESIZE 0 #define M_FW_RI_RES_WR_IQESIZE 0x3 #define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE) #define G_FW_RI_RES_WR_IQESIZE(x) \ (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE) #define S_FW_RI_RES_WR_IQNS 31 #define M_FW_RI_RES_WR_IQNS 0x1 #define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS) #define G_FW_RI_RES_WR_IQNS(x) \ (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS) #define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U) #define S_FW_RI_RES_WR_IQRO 30 #define M_FW_RI_RES_WR_IQRO 0x1 #define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO) #define G_FW_RI_RES_WR_IQRO(x) \ (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO) #define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U) struct fw_ri_rdma_write_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __u64 immd_data; __be32 plen; __be32 stag_sink; __be64 to_sink; #ifndef C99_NOT_SUPPORTED union { struct fw_ri_immd immd_src[0]; struct fw_ri_isgl isgl_src[0]; } u; #endif }; struct fw_ri_send_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __be32 sendop_pkd; __be32 stag_inv; __be32 plen; __be32 r3; __be64 r4; #ifndef C99_NOT_SUPPORTED union { struct fw_ri_immd immd_src[0]; struct fw_ri_isgl isgl_src[0]; } u; #endif }; #define S_FW_RI_SEND_WR_SENDOP 0 #define M_FW_RI_SEND_WR_SENDOP 0xf #define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP) #define G_FW_RI_SEND_WR_SENDOP(x) \ (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP) struct fw_ri_rdma_write_cmpl_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __u8 r2; __u8 flags_send; __u16 wrid_send; __be32 stag_inv; __be32 plen; __be32 stag_sink; __be64 to_sink; union fw_ri_cmpl { struct fw_ri_immd_cmpl { __u8 op; __u8 r1[6]; __u8 immdlen; __u8 data[16]; } immd_src; struct fw_ri_isgl isgl_src; } u_cmpl; __be64 r3; #ifndef C99_NOT_SUPPORTED union fw_ri_write { struct fw_ri_immd immd_src[0]; struct fw_ri_isgl isgl_src[0]; } u; #endif }; struct fw_ri_rdma_read_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __be64 r2; __be32 stag_sink; __be32 to_sink_hi; __be32 to_sink_lo; __be32 plen; __be32 stag_src; __be32 to_src_hi; __be32 to_src_lo; __be32 r5; }; struct fw_ri_recv_wr { __u8 opcode; __u8 r1; __u16 wrid; __u8 r2[3]; __u8 len16; struct fw_ri_isgl isgl; }; struct fw_ri_bind_mw_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __u8 qpbinde_to_dcacpu; __u8 pgsz_shift; __u8 addr_type; __u8 mem_perms; __be32 stag_mr; __be32 stag_mw; __be32 r3; __be64 len_mw; __be64 va_fbo; __be64 r4; }; #define S_FW_RI_BIND_MW_WR_QPBINDE 6 #define M_FW_RI_BIND_MW_WR_QPBINDE 0x1 #define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE) #define G_FW_RI_BIND_MW_WR_QPBINDE(x) \ (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE) #define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U) #define S_FW_RI_BIND_MW_WR_NS 5 #define M_FW_RI_BIND_MW_WR_NS 0x1 #define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS) #define G_FW_RI_BIND_MW_WR_NS(x) \ (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS) #define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U) #define S_FW_RI_BIND_MW_WR_DCACPU 0 #define M_FW_RI_BIND_MW_WR_DCACPU 0x1f #define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU) #define G_FW_RI_BIND_MW_WR_DCACPU(x) \ (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU) struct fw_ri_fr_nsmr_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __u8 qpbinde_to_dcacpu; __u8 pgsz_shift; __u8 addr_type; __u8 mem_perms; __be32 stag; __be32 len_hi; __be32 len_lo; __be32 va_hi; __be32 va_lo_fbo; }; #define S_FW_RI_FR_NSMR_WR_QPBINDE 6 #define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1 #define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE) #define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \ (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE) #define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U) #define S_FW_RI_FR_NSMR_WR_NS 5 #define M_FW_RI_FR_NSMR_WR_NS 0x1 #define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS) #define G_FW_RI_FR_NSMR_WR_NS(x) \ (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS) #define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U) #define S_FW_RI_FR_NSMR_WR_DCACPU 0 #define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f #define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU) #define G_FW_RI_FR_NSMR_WR_DCACPU(x) \ (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU) struct fw_ri_fr_nsmr_tpte_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __be32 r2; __be32 stag; struct fw_ri_tpte tpte; __be64 pbl[2]; }; struct fw_ri_inv_lstag_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __be32 r2; __be32 stag_inv; }; struct fw_ri_send_immediate_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __be32 sendimmop_pkd; __be32 r3; __be32 plen; __be32 r4; __be64 r5; #ifndef C99_NOT_SUPPORTED struct fw_ri_immd immd_src[0]; #endif }; #define S_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP 0 #define M_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP 0xf #define V_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP(x) \ ((x) << S_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP) #define G_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP(x) \ (((x) >> S_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP) & \ M_FW_RI_SEND_IMMEDIATE_WR_SENDIMMOP) enum fw_ri_atomic_op { FW_RI_ATOMIC_OP_FETCHADD, FW_RI_ATOMIC_OP_SWAP, FW_RI_ATOMIC_OP_CMDSWAP, }; struct fw_ri_atomic_wr { __u8 opcode; __u8 flags; __u16 wrid; __u8 r1[3]; __u8 len16; __be32 atomicop_pkd; __be64 r3; __be32 aopcode_pkd; __be32 reqid; __be32 stag; __be32 to_hi; __be32 to_lo; __be32 addswap_data_hi; __be32 addswap_data_lo; __be32 addswap_mask_hi; __be32 addswap_mask_lo; __be32 compare_data_hi; __be32 compare_data_lo; __be32 compare_mask_hi; __be32 compare_mask_lo; __be32 r5; }; #define S_FW_RI_ATOMIC_WR_ATOMICOP 0 #define M_FW_RI_ATOMIC_WR_ATOMICOP 0xf #define V_FW_RI_ATOMIC_WR_ATOMICOP(x) ((x) << S_FW_RI_ATOMIC_WR_ATOMICOP) #define G_FW_RI_ATOMIC_WR_ATOMICOP(x) \ (((x) >> S_FW_RI_ATOMIC_WR_ATOMICOP) & M_FW_RI_ATOMIC_WR_ATOMICOP) #define S_FW_RI_ATOMIC_WR_AOPCODE 0 #define M_FW_RI_ATOMIC_WR_AOPCODE 0xf #define V_FW_RI_ATOMIC_WR_AOPCODE(x) ((x) << S_FW_RI_ATOMIC_WR_AOPCODE) #define G_FW_RI_ATOMIC_WR_AOPCODE(x) \ (((x) >> S_FW_RI_ATOMIC_WR_AOPCODE) & M_FW_RI_ATOMIC_WR_AOPCODE) enum fw_ri_type { FW_RI_TYPE_INIT, FW_RI_TYPE_FINI, FW_RI_TYPE_TERMINATE }; enum fw_ri_init_p2ptype { FW_RI_INIT_P2PTYPE_RDMA_WRITE = FW_RI_RDMA_WRITE, FW_RI_INIT_P2PTYPE_READ_REQ = FW_RI_READ_REQ, FW_RI_INIT_P2PTYPE_SEND = FW_RI_SEND, FW_RI_INIT_P2PTYPE_SEND_WITH_INV = FW_RI_SEND_WITH_INV, FW_RI_INIT_P2PTYPE_SEND_WITH_SE = FW_RI_SEND_WITH_SE, FW_RI_INIT_P2PTYPE_SEND_WITH_SE_INV = FW_RI_SEND_WITH_SE_INV, FW_RI_INIT_P2PTYPE_DISABLED = 0xf, }; enum fw_ri_init_rqeqid_srq { FW_RI_INIT_RQEQID_SRQ = 1 << 31, }; struct fw_ri_wr { __be32 op_compl; __be32 flowid_len16; __u64 cookie; union fw_ri { struct fw_ri_init { __u8 type; __u8 mpareqbit_p2ptype; __u8 r4[2]; __u8 mpa_attrs; __u8 qp_caps; __be16 nrqe; __be32 pdid; __be32 qpid; __be32 sq_eqid; __be32 rq_eqid; __be32 scqid; __be32 rcqid; __be32 ord_max; __be32 ird_max; __be32 iss; __be32 irs; __be32 hwrqsize; __be32 hwrqaddr; __be64 r5; union fw_ri_init_p2p { struct fw_ri_rdma_write_wr write; struct fw_ri_rdma_read_wr read; struct fw_ri_send_wr send; } u; } init; struct fw_ri_fini { __u8 type; __u8 r3[7]; __be64 r4; } fini; struct fw_ri_terminate { __u8 type; __u8 r3[3]; __be32 immdlen; __u8 termmsg[40]; } terminate; } u; }; #define S_FW_RI_WR_MPAREQBIT 7 #define M_FW_RI_WR_MPAREQBIT 0x1 #define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT) #define G_FW_RI_WR_MPAREQBIT(x) \ (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT) #define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U) #define S_FW_RI_WR_0BRRBIT 6 #define M_FW_RI_WR_0BRRBIT 0x1 #define V_FW_RI_WR_0BRRBIT(x) ((x) << S_FW_RI_WR_0BRRBIT) #define G_FW_RI_WR_0BRRBIT(x) \ (((x) >> S_FW_RI_WR_0BRRBIT) & M_FW_RI_WR_0BRRBIT) #define F_FW_RI_WR_0BRRBIT V_FW_RI_WR_0BRRBIT(1U) #define S_FW_RI_WR_P2PTYPE 0 #define M_FW_RI_WR_P2PTYPE 0xf #define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE) #define G_FW_RI_WR_P2PTYPE(x) \ (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE) /****************************************************************************** * F O i S C S I W O R K R E Q U E S T s *********************************************/ #define FW_FOISCSI_NAME_MAX_LEN 224 #define FW_FOISCSI_ALIAS_MAX_LEN 224 #define FW_FOISCSI_KEY_MAX_LEN 64 #define FW_FOISCSI_VAL_MAX_LEN 256 #define FW_FOISCSI_CHAP_SEC_MAX_LEN 128 #define FW_FOISCSI_INIT_NODE_MAX 8 enum fw_chnet_ifconf_wr_subop { FW_CHNET_IFCONF_WR_SUBOP_NONE = 0, FW_CHNET_IFCONF_WR_SUBOP_IPV4_SET, FW_CHNET_IFCONF_WR_SUBOP_IPV4_GET, FW_CHNET_IFCONF_WR_SUBOP_VLAN_IPV4_SET, FW_CHNET_IFCONF_WR_SUBOP_VLAN_IPV4_GET, FW_CHNET_IFCONF_WR_SUBOP_IPV6_SET, FW_CHNET_IFCONF_WR_SUBOP_IPV6_GET, FW_CHNET_IFCONF_WR_SUBOP_VLAN_SET, FW_CHNET_IFCONF_WR_SUBOP_VLAN_GET, FW_CHNET_IFCONF_WR_SUBOP_MTU_SET, FW_CHNET_IFCONF_WR_SUBOP_MTU_GET, FW_CHNET_IFCONF_WR_SUBOP_DHCP_SET, FW_CHNET_IFCONF_WR_SUBOP_DHCP_GET, FW_CHNET_IFCONF_WR_SUBOP_DHCPV6_SET, FW_CHNET_IFCONF_WR_SUBOP_DHCPV6_GET, FW_CHNET_IFCONF_WR_SUBOP_LINKLOCAL_ADDR_SET, FW_CHNET_IFCONF_WR_SUBOP_RA_BASED_ADDR_SET, FW_CHNET_IFCONF_WR_SUBOP_ADDR_EXPIRED, FW_CHNET_IFCONF_WR_SUBOP_ICMP_PING4, FW_CHNET_IFCONF_WR_SUBOP_ICMP_PING6, FW_CHNET_IFCONF_WR_SUBOP_ICMP_PLD_PING4, FW_CHNET_IFCONF_WR_SUBOP_ICMP_PLD_PING6, FW_CHNET_IFCONF_WR_SUBOP_PMTU6_CLEAR, FW_CHNET_IFCONF_WR_SUBOP_MAX, }; struct fw_chnet_ifconf_wr { __be32 op_compl; __be32 flowid_len16; __u64 cookie; __be32 if_flowid; __u8 idx; __u8 subop; __u8 retval; __u8 r2; union { __be64 r3; struct fw_chnet_ifconf_ping { __be16 ping_time; __u8 ping_rsptype; __u8 ping_param_rspcode_to_fin_bit; __u8 ping_pktsize; __u8 ping_ttl; __be16 ping_seq; } ping; struct fw_chnet_ifconf_mac { __u8 peer_mac[6]; __u8 smac_idx; } mac; } u; struct fw_chnet_ifconf_params { __be16 ping_pldsize; __be16 r0; __be16 vlanid; __be16 mtu; union fw_chnet_ifconf_addr_type { struct fw_chnet_ifconf_ipv4 { __be32 addr; __be32 mask; __be32 router; __be32 r0; __be64 r1; } ipv4; struct fw_chnet_ifconf_ipv6 { __u8 prefix_len; __u8 r0; __be16 r1; __be32 r2; __be64 addr_hi; __be64 addr_lo; __be64 router_hi; __be64 router_lo; } ipv6; } in_attr; } param; }; #define S_FW_CHNET_IFCONF_WR_PING_MACBIT 1 #define M_FW_CHNET_IFCONF_WR_PING_MACBIT 0x1 #define V_FW_CHNET_IFCONF_WR_PING_MACBIT(x) \ ((x) << S_FW_CHNET_IFCONF_WR_PING_MACBIT) #define G_FW_CHNET_IFCONF_WR_PING_MACBIT(x) \ (((x) >> S_FW_CHNET_IFCONF_WR_PING_MACBIT) & \ M_FW_CHNET_IFCONF_WR_PING_MACBIT) #define F_FW_CHNET_IFCONF_WR_PING_MACBIT \ V_FW_CHNET_IFCONF_WR_PING_MACBIT(1U) #define S_FW_CHNET_IFCONF_WR_FIN_BIT 0 #define M_FW_CHNET_IFCONF_WR_FIN_BIT 0x1 #define V_FW_CHNET_IFCONF_WR_FIN_BIT(x) ((x) << S_FW_CHNET_IFCONF_WR_FIN_BIT) #define G_FW_CHNET_IFCONF_WR_FIN_BIT(x) \ (((x) >> S_FW_CHNET_IFCONF_WR_FIN_BIT) & M_FW_CHNET_IFCONF_WR_FIN_BIT) #define F_FW_CHNET_IFCONF_WR_FIN_BIT V_FW_CHNET_IFCONF_WR_FIN_BIT(1U) enum fw_foiscsi_node_type { FW_FOISCSI_NODE_TYPE_INITIATOR = 0, FW_FOISCSI_NODE_TYPE_TARGET, }; enum fw_foiscsi_session_type { FW_FOISCSI_SESSION_TYPE_DISCOVERY = 0, FW_FOISCSI_SESSION_TYPE_NORMAL, }; enum fw_foiscsi_auth_policy { FW_FOISCSI_AUTH_POLICY_ONEWAY = 0, FW_FOISCSI_AUTH_POLICY_MUTUAL, }; enum fw_foiscsi_auth_method { FW_FOISCSI_AUTH_METHOD_NONE = 0, FW_FOISCSI_AUTH_METHOD_CHAP, FW_FOISCSI_AUTH_METHOD_CHAP_FST, FW_FOISCSI_AUTH_METHOD_CHAP_SEC, }; enum fw_foiscsi_digest_type { FW_FOISCSI_DIGEST_TYPE_NONE = 0, FW_FOISCSI_DIGEST_TYPE_CRC32, FW_FOISCSI_DIGEST_TYPE_CRC32_FST, FW_FOISCSI_DIGEST_TYPE_CRC32_SEC, }; enum fw_foiscsi_wr_subop { FW_FOISCSI_WR_SUBOP_ADD = 1, FW_FOISCSI_WR_SUBOP_DEL = 2, FW_FOISCSI_WR_SUBOP_MOD = 4, }; enum fw_coiscsi_stats_wr_subop { FW_COISCSI_WR_SUBOP_TOT = 1, FW_COISCSI_WR_SUBOP_MAX = 2, FW_COISCSI_WR_SUBOP_CUR = 3, FW_COISCSI_WR_SUBOP_CLR = 4, }; enum fw_foiscsi_ctrl_state { FW_FOISCSI_CTRL_STATE_FREE = 0, FW_FOISCSI_CTRL_STATE_ONLINE = 1, FW_FOISCSI_CTRL_STATE_FAILED, FW_FOISCSI_CTRL_STATE_IN_RECOVERY, FW_FOISCSI_CTRL_STATE_REDIRECT, }; struct fw_rdev_wr { __be32 op_to_immdlen; __be32 alloc_to_len16; __be64 cookie; __u8 protocol; __u8 event_cause; __u8 cur_state; __u8 prev_state; __be32 flags_to_assoc_flowid; union rdev_entry { struct fcoe_rdev_entry { __be32 flowid; __u8 protocol; __u8 event_cause; __u8 flags; __u8 rjt_reason; __u8 cur_login_st; __u8 prev_login_st; __be16 rcv_fr_sz; __u8 rd_xfer_rdy_to_rport_type; __u8 vft_to_qos; __u8 org_proc_assoc_to_acc_rsp_code; __u8 enh_disc_to_tgt; __u8 wwnn[8]; __u8 wwpn[8]; __be16 iqid; __u8 fc_oui[3]; __u8 r_id[3]; } fcoe_rdev; struct iscsi_rdev_entry { __be32 flowid; __u8 protocol; __u8 event_cause; __u8 flags; __u8 r3; __be16 iscsi_opts; __be16 tcp_opts; __be16 ip_opts; __be16 max_rcv_len; __be16 max_snd_len; __be16 first_brst_len; __be16 max_brst_len; __be16 r4; __be16 def_time2wait; __be16 def_time2ret; __be16 nop_out_intrvl; __be16 non_scsi_to; __be16 isid; __be16 tsid; __be16 port; __be16 tpgt; __u8 r5[6]; __be16 iqid; } iscsi_rdev; } u; }; #define S_FW_RDEV_WR_IMMDLEN 0 #define M_FW_RDEV_WR_IMMDLEN 0xff #define V_FW_RDEV_WR_IMMDLEN(x) ((x) << S_FW_RDEV_WR_IMMDLEN) #define G_FW_RDEV_WR_IMMDLEN(x) \ (((x) >> S_FW_RDEV_WR_IMMDLEN) & M_FW_RDEV_WR_IMMDLEN) #define S_FW_RDEV_WR_ALLOC 31 #define M_FW_RDEV_WR_ALLOC 0x1 #define V_FW_RDEV_WR_ALLOC(x) ((x) << S_FW_RDEV_WR_ALLOC) #define G_FW_RDEV_WR_ALLOC(x) \ (((x) >> S_FW_RDEV_WR_ALLOC) & M_FW_RDEV_WR_ALLOC) #define F_FW_RDEV_WR_ALLOC V_FW_RDEV_WR_ALLOC(1U) #define S_FW_RDEV_WR_FREE 30 #define M_FW_RDEV_WR_FREE 0x1 #define V_FW_RDEV_WR_FREE(x) ((x) << S_FW_RDEV_WR_FREE) #define G_FW_RDEV_WR_FREE(x) \ (((x) >> S_FW_RDEV_WR_FREE) & M_FW_RDEV_WR_FREE) #define F_FW_RDEV_WR_FREE V_FW_RDEV_WR_FREE(1U) #define S_FW_RDEV_WR_MODIFY 29 #define M_FW_RDEV_WR_MODIFY 0x1 #define V_FW_RDEV_WR_MODIFY(x) ((x) << S_FW_RDEV_WR_MODIFY) #define G_FW_RDEV_WR_MODIFY(x) \ (((x) >> S_FW_RDEV_WR_MODIFY) & M_FW_RDEV_WR_MODIFY) #define F_FW_RDEV_WR_MODIFY V_FW_RDEV_WR_MODIFY(1U) #define S_FW_RDEV_WR_FLOWID 8 #define M_FW_RDEV_WR_FLOWID 0xfffff #define V_FW_RDEV_WR_FLOWID(x) ((x) << S_FW_RDEV_WR_FLOWID) #define G_FW_RDEV_WR_FLOWID(x) \ (((x) >> S_FW_RDEV_WR_FLOWID) & M_FW_RDEV_WR_FLOWID) #define S_FW_RDEV_WR_LEN16 0 #define M_FW_RDEV_WR_LEN16 0xff #define V_FW_RDEV_WR_LEN16(x) ((x) << S_FW_RDEV_WR_LEN16) #define G_FW_RDEV_WR_LEN16(x) \ (((x) >> S_FW_RDEV_WR_LEN16) & M_FW_RDEV_WR_LEN16) #define S_FW_RDEV_WR_FLAGS 24 #define M_FW_RDEV_WR_FLAGS 0xff #define V_FW_RDEV_WR_FLAGS(x) ((x) << S_FW_RDEV_WR_FLAGS) #define G_FW_RDEV_WR_FLAGS(x) \ (((x) >> S_FW_RDEV_WR_FLAGS) & M_FW_RDEV_WR_FLAGS) #define S_FW_RDEV_WR_GET_NEXT 20 #define M_FW_RDEV_WR_GET_NEXT 0xf #define V_FW_RDEV_WR_GET_NEXT(x) ((x) << S_FW_RDEV_WR_GET_NEXT) #define G_FW_RDEV_WR_GET_NEXT(x) \ (((x) >> S_FW_RDEV_WR_GET_NEXT) & M_FW_RDEV_WR_GET_NEXT) #define S_FW_RDEV_WR_ASSOC_FLOWID 0 #define M_FW_RDEV_WR_ASSOC_FLOWID 0xfffff #define V_FW_RDEV_WR_ASSOC_FLOWID(x) ((x) << S_FW_RDEV_WR_ASSOC_FLOWID) #define G_FW_RDEV_WR_ASSOC_FLOWID(x) \ (((x) >> S_FW_RDEV_WR_ASSOC_FLOWID) & M_FW_RDEV_WR_ASSOC_FLOWID) #define S_FW_RDEV_WR_RJT 7 #define M_FW_RDEV_WR_RJT 0x1 #define V_FW_RDEV_WR_RJT(x) ((x) << S_FW_RDEV_WR_RJT) #define G_FW_RDEV_WR_RJT(x) (((x) >> S_FW_RDEV_WR_RJT) & M_FW_RDEV_WR_RJT) #define F_FW_RDEV_WR_RJT V_FW_RDEV_WR_RJT(1U) #define S_FW_RDEV_WR_REASON 0 #define M_FW_RDEV_WR_REASON 0x7f #define V_FW_RDEV_WR_REASON(x) ((x) << S_FW_RDEV_WR_REASON) #define G_FW_RDEV_WR_REASON(x) \ (((x) >> S_FW_RDEV_WR_REASON) & M_FW_RDEV_WR_REASON) #define S_FW_RDEV_WR_RD_XFER_RDY 7 #define M_FW_RDEV_WR_RD_XFER_RDY 0x1 #define V_FW_RDEV_WR_RD_XFER_RDY(x) ((x) << S_FW_RDEV_WR_RD_XFER_RDY) #define G_FW_RDEV_WR_RD_XFER_RDY(x) \ (((x) >> S_FW_RDEV_WR_RD_XFER_RDY) & M_FW_RDEV_WR_RD_XFER_RDY) #define F_FW_RDEV_WR_RD_XFER_RDY V_FW_RDEV_WR_RD_XFER_RDY(1U) #define S_FW_RDEV_WR_WR_XFER_RDY 6 #define M_FW_RDEV_WR_WR_XFER_RDY 0x1 #define V_FW_RDEV_WR_WR_XFER_RDY(x) ((x) << S_FW_RDEV_WR_WR_XFER_RDY) #define G_FW_RDEV_WR_WR_XFER_RDY(x) \ (((x) >> S_FW_RDEV_WR_WR_XFER_RDY) & M_FW_RDEV_WR_WR_XFER_RDY) #define F_FW_RDEV_WR_WR_XFER_RDY V_FW_RDEV_WR_WR_XFER_RDY(1U) #define S_FW_RDEV_WR_FC_SP 5 #define M_FW_RDEV_WR_FC_SP 0x1 #define V_FW_RDEV_WR_FC_SP(x) ((x) << S_FW_RDEV_WR_FC_SP) #define G_FW_RDEV_WR_FC_SP(x) \ (((x) >> S_FW_RDEV_WR_FC_SP) & M_FW_RDEV_WR_FC_SP) #define F_FW_RDEV_WR_FC_SP V_FW_RDEV_WR_FC_SP(1U) #define S_FW_RDEV_WR_RPORT_TYPE 0 #define M_FW_RDEV_WR_RPORT_TYPE 0x1f #define V_FW_RDEV_WR_RPORT_TYPE(x) ((x) << S_FW_RDEV_WR_RPORT_TYPE) #define G_FW_RDEV_WR_RPORT_TYPE(x) \ (((x) >> S_FW_RDEV_WR_RPORT_TYPE) & M_FW_RDEV_WR_RPORT_TYPE) #define S_FW_RDEV_WR_VFT 7 #define M_FW_RDEV_WR_VFT 0x1 #define V_FW_RDEV_WR_VFT(x) ((x) << S_FW_RDEV_WR_VFT) #define G_FW_RDEV_WR_VFT(x) (((x) >> S_FW_RDEV_WR_VFT) & M_FW_RDEV_WR_VFT) #define F_FW_RDEV_WR_VFT V_FW_RDEV_WR_VFT(1U) #define S_FW_RDEV_WR_NPIV 6 #define M_FW_RDEV_WR_NPIV 0x1 #define V_FW_RDEV_WR_NPIV(x) ((x) << S_FW_RDEV_WR_NPIV) #define G_FW_RDEV_WR_NPIV(x) \ (((x) >> S_FW_RDEV_WR_NPIV) & M_FW_RDEV_WR_NPIV) #define F_FW_RDEV_WR_NPIV V_FW_RDEV_WR_NPIV(1U) #define S_FW_RDEV_WR_CLASS 4 #define M_FW_RDEV_WR_CLASS 0x3 #define V_FW_RDEV_WR_CLASS(x) ((x) << S_FW_RDEV_WR_CLASS) #define G_FW_RDEV_WR_CLASS(x) \ (((x) >> S_FW_RDEV_WR_CLASS) & M_FW_RDEV_WR_CLASS) #define S_FW_RDEV_WR_SEQ_DEL 3 #define M_FW_RDEV_WR_SEQ_DEL 0x1 #define V_FW_RDEV_WR_SEQ_DEL(x) ((x) << S_FW_RDEV_WR_SEQ_DEL) #define G_FW_RDEV_WR_SEQ_DEL(x) \ (((x) >> S_FW_RDEV_WR_SEQ_DEL) & M_FW_RDEV_WR_SEQ_DEL) #define F_FW_RDEV_WR_SEQ_DEL V_FW_RDEV_WR_SEQ_DEL(1U) #define S_FW_RDEV_WR_PRIO_PREEMP 2 #define M_FW_RDEV_WR_PRIO_PREEMP 0x1 #define V_FW_RDEV_WR_PRIO_PREEMP(x) ((x) << S_FW_RDEV_WR_PRIO_PREEMP) #define G_FW_RDEV_WR_PRIO_PREEMP(x) \ (((x) >> S_FW_RDEV_WR_PRIO_PREEMP) & M_FW_RDEV_WR_PRIO_PREEMP) #define F_FW_RDEV_WR_PRIO_PREEMP V_FW_RDEV_WR_PRIO_PREEMP(1U) #define S_FW_RDEV_WR_PREF 1 #define M_FW_RDEV_WR_PREF 0x1 #define V_FW_RDEV_WR_PREF(x) ((x) << S_FW_RDEV_WR_PREF) #define G_FW_RDEV_WR_PREF(x) \ (((x) >> S_FW_RDEV_WR_PREF) & M_FW_RDEV_WR_PREF) #define F_FW_RDEV_WR_PREF V_FW_RDEV_WR_PREF(1U) #define S_FW_RDEV_WR_QOS 0 #define M_FW_RDEV_WR_QOS 0x1 #define V_FW_RDEV_WR_QOS(x) ((x) << S_FW_RDEV_WR_QOS) #define G_FW_RDEV_WR_QOS(x) (((x) >> S_FW_RDEV_WR_QOS) & M_FW_RDEV_WR_QOS) #define F_FW_RDEV_WR_QOS V_FW_RDEV_WR_QOS(1U) #define S_FW_RDEV_WR_ORG_PROC_ASSOC 7 #define M_FW_RDEV_WR_ORG_PROC_ASSOC 0x1 #define V_FW_RDEV_WR_ORG_PROC_ASSOC(x) ((x) << S_FW_RDEV_WR_ORG_PROC_ASSOC) #define G_FW_RDEV_WR_ORG_PROC_ASSOC(x) \ (((x) >> S_FW_RDEV_WR_ORG_PROC_ASSOC) & M_FW_RDEV_WR_ORG_PROC_ASSOC) #define F_FW_RDEV_WR_ORG_PROC_ASSOC V_FW_RDEV_WR_ORG_PROC_ASSOC(1U) #define S_FW_RDEV_WR_RSP_PROC_ASSOC 6 #define M_FW_RDEV_WR_RSP_PROC_ASSOC 0x1 #define V_FW_RDEV_WR_RSP_PROC_ASSOC(x) ((x) << S_FW_RDEV_WR_RSP_PROC_ASSOC) #define G_FW_RDEV_WR_RSP_PROC_ASSOC(x) \ (((x) >> S_FW_RDEV_WR_RSP_PROC_ASSOC) & M_FW_RDEV_WR_RSP_PROC_ASSOC) #define F_FW_RDEV_WR_RSP_PROC_ASSOC V_FW_RDEV_WR_RSP_PROC_ASSOC(1U) #define S_FW_RDEV_WR_IMAGE_PAIR 5 #define M_FW_RDEV_WR_IMAGE_PAIR 0x1 #define V_FW_RDEV_WR_IMAGE_PAIR(x) ((x) << S_FW_RDEV_WR_IMAGE_PAIR) #define G_FW_RDEV_WR_IMAGE_PAIR(x) \ (((x) >> S_FW_RDEV_WR_IMAGE_PAIR) & M_FW_RDEV_WR_IMAGE_PAIR) #define F_FW_RDEV_WR_IMAGE_PAIR V_FW_RDEV_WR_IMAGE_PAIR(1U) #define S_FW_RDEV_WR_ACC_RSP_CODE 0 #define M_FW_RDEV_WR_ACC_RSP_CODE 0x1f #define V_FW_RDEV_WR_ACC_RSP_CODE(x) ((x) << S_FW_RDEV_WR_ACC_RSP_CODE) #define G_FW_RDEV_WR_ACC_RSP_CODE(x) \ (((x) >> S_FW_RDEV_WR_ACC_RSP_CODE) & M_FW_RDEV_WR_ACC_RSP_CODE) #define S_FW_RDEV_WR_ENH_DISC 7 #define M_FW_RDEV_WR_ENH_DISC 0x1 #define V_FW_RDEV_WR_ENH_DISC(x) ((x) << S_FW_RDEV_WR_ENH_DISC) #define G_FW_RDEV_WR_ENH_DISC(x) \ (((x) >> S_FW_RDEV_WR_ENH_DISC) & M_FW_RDEV_WR_ENH_DISC) #define F_FW_RDEV_WR_ENH_DISC V_FW_RDEV_WR_ENH_DISC(1U) #define S_FW_RDEV_WR_REC 6 #define M_FW_RDEV_WR_REC 0x1 #define V_FW_RDEV_WR_REC(x) ((x) << S_FW_RDEV_WR_REC) #define G_FW_RDEV_WR_REC(x) (((x) >> S_FW_RDEV_WR_REC) & M_FW_RDEV_WR_REC) #define F_FW_RDEV_WR_REC V_FW_RDEV_WR_REC(1U) #define S_FW_RDEV_WR_TASK_RETRY_ID 5 #define M_FW_RDEV_WR_TASK_RETRY_ID 0x1 #define V_FW_RDEV_WR_TASK_RETRY_ID(x) ((x) << S_FW_RDEV_WR_TASK_RETRY_ID) #define G_FW_RDEV_WR_TASK_RETRY_ID(x) \ (((x) >> S_FW_RDEV_WR_TASK_RETRY_ID) & M_FW_RDEV_WR_TASK_RETRY_ID) #define F_FW_RDEV_WR_TASK_RETRY_ID V_FW_RDEV_WR_TASK_RETRY_ID(1U) #define S_FW_RDEV_WR_RETRY 4 #define M_FW_RDEV_WR_RETRY 0x1 #define V_FW_RDEV_WR_RETRY(x) ((x) << S_FW_RDEV_WR_RETRY) #define G_FW_RDEV_WR_RETRY(x) \ (((x) >> S_FW_RDEV_WR_RETRY) & M_FW_RDEV_WR_RETRY) #define F_FW_RDEV_WR_RETRY V_FW_RDEV_WR_RETRY(1U) #define S_FW_RDEV_WR_CONF_CMPL 3 #define M_FW_RDEV_WR_CONF_CMPL 0x1 #define V_FW_RDEV_WR_CONF_CMPL(x) ((x) << S_FW_RDEV_WR_CONF_CMPL) #define G_FW_RDEV_WR_CONF_CMPL(x) \ (((x) >> S_FW_RDEV_WR_CONF_CMPL) & M_FW_RDEV_WR_CONF_CMPL) #define F_FW_RDEV_WR_CONF_CMPL V_FW_RDEV_WR_CONF_CMPL(1U) #define S_FW_RDEV_WR_DATA_OVLY 2 #define M_FW_RDEV_WR_DATA_OVLY 0x1 #define V_FW_RDEV_WR_DATA_OVLY(x) ((x) << S_FW_RDEV_WR_DATA_OVLY) #define G_FW_RDEV_WR_DATA_OVLY(x) \ (((x) >> S_FW_RDEV_WR_DATA_OVLY) & M_FW_RDEV_WR_DATA_OVLY) #define F_FW_RDEV_WR_DATA_OVLY V_FW_RDEV_WR_DATA_OVLY(1U) #define S_FW_RDEV_WR_INI 1 #define M_FW_RDEV_WR_INI 0x1 #define V_FW_RDEV_WR_INI(x) ((x) << S_FW_RDEV_WR_INI) #define G_FW_RDEV_WR_INI(x) (((x) >> S_FW_RDEV_WR_INI) & M_FW_RDEV_WR_INI) #define F_FW_RDEV_WR_INI V_FW_RDEV_WR_INI(1U) #define S_FW_RDEV_WR_TGT 0 #define M_FW_RDEV_WR_TGT 0x1 #define V_FW_RDEV_WR_TGT(x) ((x) << S_FW_RDEV_WR_TGT) #define G_FW_RDEV_WR_TGT(x) (((x) >> S_FW_RDEV_WR_TGT) & M_FW_RDEV_WR_TGT) #define F_FW_RDEV_WR_TGT V_FW_RDEV_WR_TGT(1U) struct fw_foiscsi_node_wr { __be32 op_to_immdlen; __be32 no_sess_recv_to_len16; __u64 cookie; __u8 subop; __u8 status; __u8 alias_len; __u8 iqn_len; __be32 node_flowid; __be16 nodeid; __be16 login_retry; __be16 retry_timeout; __be16 r3; __u8 iqn[224]; __u8 alias[224]; __be32 isid_tval_to_isid_cval; }; #define S_FW_FOISCSI_NODE_WR_IMMDLEN 0 #define M_FW_FOISCSI_NODE_WR_IMMDLEN 0xffff #define V_FW_FOISCSI_NODE_WR_IMMDLEN(x) ((x) << S_FW_FOISCSI_NODE_WR_IMMDLEN) #define G_FW_FOISCSI_NODE_WR_IMMDLEN(x) \ (((x) >> S_FW_FOISCSI_NODE_WR_IMMDLEN) & M_FW_FOISCSI_NODE_WR_IMMDLEN) #define S_FW_FOISCSI_NODE_WR_NO_SESS_RECV 28 #define M_FW_FOISCSI_NODE_WR_NO_SESS_RECV 0x1 #define V_FW_FOISCSI_NODE_WR_NO_SESS_RECV(x) \ ((x) << S_FW_FOISCSI_NODE_WR_NO_SESS_RECV) #define G_FW_FOISCSI_NODE_WR_NO_SESS_RECV(x) \ (((x) >> S_FW_FOISCSI_NODE_WR_NO_SESS_RECV) & \ M_FW_FOISCSI_NODE_WR_NO_SESS_RECV) #define F_FW_FOISCSI_NODE_WR_NO_SESS_RECV \ V_FW_FOISCSI_NODE_WR_NO_SESS_RECV(1U) #define S_FW_FOISCSI_NODE_WR_ISID_TVAL 30 #define M_FW_FOISCSI_NODE_WR_ISID_TVAL 0x3 #define V_FW_FOISCSI_NODE_WR_ISID_TVAL(x) \ ((x) << S_FW_FOISCSI_NODE_WR_ISID_TVAL) #define G_FW_FOISCSI_NODE_WR_ISID_TVAL(x) \ (((x) >> S_FW_FOISCSI_NODE_WR_ISID_TVAL) & M_FW_FOISCSI_NODE_WR_ISID_TVAL) #define S_FW_FOISCSI_NODE_WR_ISID_AVAL 24 #define M_FW_FOISCSI_NODE_WR_ISID_AVAL 0x3f #define V_FW_FOISCSI_NODE_WR_ISID_AVAL(x) \ ((x) << S_FW_FOISCSI_NODE_WR_ISID_AVAL) #define G_FW_FOISCSI_NODE_WR_ISID_AVAL(x) \ (((x) >> S_FW_FOISCSI_NODE_WR_ISID_AVAL) & M_FW_FOISCSI_NODE_WR_ISID_AVAL) #define S_FW_FOISCSI_NODE_WR_ISID_BVAL 8 #define M_FW_FOISCSI_NODE_WR_ISID_BVAL 0xffff #define V_FW_FOISCSI_NODE_WR_ISID_BVAL(x) \ ((x) << S_FW_FOISCSI_NODE_WR_ISID_BVAL) #define G_FW_FOISCSI_NODE_WR_ISID_BVAL(x) \ (((x) >> S_FW_FOISCSI_NODE_WR_ISID_BVAL) & M_FW_FOISCSI_NODE_WR_ISID_BVAL) #define S_FW_FOISCSI_NODE_WR_ISID_CVAL 0 #define M_FW_FOISCSI_NODE_WR_ISID_CVAL 0xff #define V_FW_FOISCSI_NODE_WR_ISID_CVAL(x) \ ((x) << S_FW_FOISCSI_NODE_WR_ISID_CVAL) #define G_FW_FOISCSI_NODE_WR_ISID_CVAL(x) \ (((x) >> S_FW_FOISCSI_NODE_WR_ISID_CVAL) & M_FW_FOISCSI_NODE_WR_ISID_CVAL) struct fw_foiscsi_ctrl_wr { __be32 op_to_no_fin; __be32 flowid_len16; __u64 cookie; __u8 subop; __u8 status; __u8 ctrl_state; __u8 io_state; __be32 node_id; __be32 ctrl_id; __be32 io_id; struct fw_foiscsi_sess_attr { __be32 sess_type_to_erl; __be16 max_conn; __be16 max_r2t; __be16 time2wait; __be16 time2retain; __be32 max_burst; __be32 first_burst; __be32 r1; } sess_attr; struct fw_foiscsi_conn_attr { __be32 hdigest_to_tcp_ws_en; __be32 max_rcv_dsl; __be32 ping_tmo; __be16 dst_port; __be16 src_port; union fw_foiscsi_conn_attr_addr { struct fw_foiscsi_conn_attr_ipv6 { __be64 dst_addr[2]; __be64 src_addr[2]; } ipv6_addr; struct fw_foiscsi_conn_attr_ipv4 { __be32 dst_addr; __be32 src_addr; } ipv4_addr; } u; } conn_attr; __u8 tgt_name_len; __u8 r3[7]; __u8 tgt_name[FW_FOISCSI_NAME_MAX_LEN]; }; #define S_FW_FOISCSI_CTRL_WR_PORTID 1 #define M_FW_FOISCSI_CTRL_WR_PORTID 0x7 #define V_FW_FOISCSI_CTRL_WR_PORTID(x) ((x) << S_FW_FOISCSI_CTRL_WR_PORTID) #define G_FW_FOISCSI_CTRL_WR_PORTID(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_PORTID) & M_FW_FOISCSI_CTRL_WR_PORTID) #define S_FW_FOISCSI_CTRL_WR_NO_FIN 0 #define M_FW_FOISCSI_CTRL_WR_NO_FIN 0x1 #define V_FW_FOISCSI_CTRL_WR_NO_FIN(x) ((x) << S_FW_FOISCSI_CTRL_WR_NO_FIN) #define G_FW_FOISCSI_CTRL_WR_NO_FIN(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_NO_FIN) & M_FW_FOISCSI_CTRL_WR_NO_FIN) #define F_FW_FOISCSI_CTRL_WR_NO_FIN V_FW_FOISCSI_CTRL_WR_NO_FIN(1U) #define S_FW_FOISCSI_CTRL_WR_SESS_TYPE 30 #define M_FW_FOISCSI_CTRL_WR_SESS_TYPE 0x3 #define V_FW_FOISCSI_CTRL_WR_SESS_TYPE(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_SESS_TYPE) #define G_FW_FOISCSI_CTRL_WR_SESS_TYPE(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_SESS_TYPE) & M_FW_FOISCSI_CTRL_WR_SESS_TYPE) #define S_FW_FOISCSI_CTRL_WR_SEQ_INORDER 29 #define M_FW_FOISCSI_CTRL_WR_SEQ_INORDER 0x1 #define V_FW_FOISCSI_CTRL_WR_SEQ_INORDER(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_SEQ_INORDER) #define G_FW_FOISCSI_CTRL_WR_SEQ_INORDER(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_SEQ_INORDER) & \ M_FW_FOISCSI_CTRL_WR_SEQ_INORDER) #define F_FW_FOISCSI_CTRL_WR_SEQ_INORDER \ V_FW_FOISCSI_CTRL_WR_SEQ_INORDER(1U) #define S_FW_FOISCSI_CTRL_WR_PDU_INORDER 28 #define M_FW_FOISCSI_CTRL_WR_PDU_INORDER 0x1 #define V_FW_FOISCSI_CTRL_WR_PDU_INORDER(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_PDU_INORDER) #define G_FW_FOISCSI_CTRL_WR_PDU_INORDER(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_PDU_INORDER) & \ M_FW_FOISCSI_CTRL_WR_PDU_INORDER) #define F_FW_FOISCSI_CTRL_WR_PDU_INORDER \ V_FW_FOISCSI_CTRL_WR_PDU_INORDER(1U) #define S_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN 27 #define M_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN 0x1 #define V_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN) #define G_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN) & \ M_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN) #define F_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN \ V_FW_FOISCSI_CTRL_WR_IMMD_DATA_EN(1U) #define S_FW_FOISCSI_CTRL_WR_INIT_R2T_EN 26 #define M_FW_FOISCSI_CTRL_WR_INIT_R2T_EN 0x1 #define V_FW_FOISCSI_CTRL_WR_INIT_R2T_EN(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_INIT_R2T_EN) #define G_FW_FOISCSI_CTRL_WR_INIT_R2T_EN(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_INIT_R2T_EN) & \ M_FW_FOISCSI_CTRL_WR_INIT_R2T_EN) #define F_FW_FOISCSI_CTRL_WR_INIT_R2T_EN \ V_FW_FOISCSI_CTRL_WR_INIT_R2T_EN(1U) #define S_FW_FOISCSI_CTRL_WR_ERL 24 #define M_FW_FOISCSI_CTRL_WR_ERL 0x3 #define V_FW_FOISCSI_CTRL_WR_ERL(x) ((x) << S_FW_FOISCSI_CTRL_WR_ERL) #define G_FW_FOISCSI_CTRL_WR_ERL(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_ERL) & M_FW_FOISCSI_CTRL_WR_ERL) #define S_FW_FOISCSI_CTRL_WR_HDIGEST 30 #define M_FW_FOISCSI_CTRL_WR_HDIGEST 0x3 #define V_FW_FOISCSI_CTRL_WR_HDIGEST(x) ((x) << S_FW_FOISCSI_CTRL_WR_HDIGEST) #define G_FW_FOISCSI_CTRL_WR_HDIGEST(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_HDIGEST) & M_FW_FOISCSI_CTRL_WR_HDIGEST) #define S_FW_FOISCSI_CTRL_WR_DDIGEST 28 #define M_FW_FOISCSI_CTRL_WR_DDIGEST 0x3 #define V_FW_FOISCSI_CTRL_WR_DDIGEST(x) ((x) << S_FW_FOISCSI_CTRL_WR_DDIGEST) #define G_FW_FOISCSI_CTRL_WR_DDIGEST(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_DDIGEST) & M_FW_FOISCSI_CTRL_WR_DDIGEST) #define S_FW_FOISCSI_CTRL_WR_AUTH_METHOD 25 #define M_FW_FOISCSI_CTRL_WR_AUTH_METHOD 0x7 #define V_FW_FOISCSI_CTRL_WR_AUTH_METHOD(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_AUTH_METHOD) #define G_FW_FOISCSI_CTRL_WR_AUTH_METHOD(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_AUTH_METHOD) & \ M_FW_FOISCSI_CTRL_WR_AUTH_METHOD) #define S_FW_FOISCSI_CTRL_WR_AUTH_POLICY 23 #define M_FW_FOISCSI_CTRL_WR_AUTH_POLICY 0x3 #define V_FW_FOISCSI_CTRL_WR_AUTH_POLICY(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_AUTH_POLICY) #define G_FW_FOISCSI_CTRL_WR_AUTH_POLICY(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_AUTH_POLICY) & \ M_FW_FOISCSI_CTRL_WR_AUTH_POLICY) #define S_FW_FOISCSI_CTRL_WR_DDP_PGSZ 21 #define M_FW_FOISCSI_CTRL_WR_DDP_PGSZ 0x3 #define V_FW_FOISCSI_CTRL_WR_DDP_PGSZ(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_DDP_PGSZ) #define G_FW_FOISCSI_CTRL_WR_DDP_PGSZ(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_DDP_PGSZ) & M_FW_FOISCSI_CTRL_WR_DDP_PGSZ) #define S_FW_FOISCSI_CTRL_WR_IPV6 20 #define M_FW_FOISCSI_CTRL_WR_IPV6 0x1 #define V_FW_FOISCSI_CTRL_WR_IPV6(x) ((x) << S_FW_FOISCSI_CTRL_WR_IPV6) #define G_FW_FOISCSI_CTRL_WR_IPV6(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_IPV6) & M_FW_FOISCSI_CTRL_WR_IPV6) #define F_FW_FOISCSI_CTRL_WR_IPV6 V_FW_FOISCSI_CTRL_WR_IPV6(1U) #define S_FW_FOISCSI_CTRL_WR_DDP_PGIDX 16 #define M_FW_FOISCSI_CTRL_WR_DDP_PGIDX 0xf #define V_FW_FOISCSI_CTRL_WR_DDP_PGIDX(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_DDP_PGIDX) #define G_FW_FOISCSI_CTRL_WR_DDP_PGIDX(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_DDP_PGIDX) & M_FW_FOISCSI_CTRL_WR_DDP_PGIDX) #define S_FW_FOISCSI_CTRL_WR_TCP_WS 12 #define M_FW_FOISCSI_CTRL_WR_TCP_WS 0xf #define V_FW_FOISCSI_CTRL_WR_TCP_WS(x) ((x) << S_FW_FOISCSI_CTRL_WR_TCP_WS) #define G_FW_FOISCSI_CTRL_WR_TCP_WS(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_TCP_WS) & M_FW_FOISCSI_CTRL_WR_TCP_WS) #define S_FW_FOISCSI_CTRL_WR_TCP_WS_EN 11 #define M_FW_FOISCSI_CTRL_WR_TCP_WS_EN 0x1 #define V_FW_FOISCSI_CTRL_WR_TCP_WS_EN(x) \ ((x) << S_FW_FOISCSI_CTRL_WR_TCP_WS_EN) #define G_FW_FOISCSI_CTRL_WR_TCP_WS_EN(x) \ (((x) >> S_FW_FOISCSI_CTRL_WR_TCP_WS_EN) & M_FW_FOISCSI_CTRL_WR_TCP_WS_EN) #define F_FW_FOISCSI_CTRL_WR_TCP_WS_EN V_FW_FOISCSI_CTRL_WR_TCP_WS_EN(1U) struct fw_foiscsi_chap_wr { __be32 op_to_kv_flag; __be32 flowid_len16; __u64 cookie; __u8 status; union fw_foiscsi_len { struct fw_foiscsi_chap_lens { __u8 id_len; __u8 sec_len; } chapl; struct fw_foiscsi_vend_kv_lens { __u8 key_len; __u8 val_len; } vend_kvl; } lenu; __u8 node_type; __be16 node_id; __u8 r3[2]; union fw_foiscsi_chap_vend { struct fw_foiscsi_chap { __u8 chap_id[224]; __u8 chap_sec[128]; } chap; struct fw_foiscsi_vend_kv { __u8 vend_key[64]; __u8 vend_val[256]; } vend_kv; } u; }; #define S_FW_FOISCSI_CHAP_WR_KV_FLAG 20 #define M_FW_FOISCSI_CHAP_WR_KV_FLAG 0x1 #define V_FW_FOISCSI_CHAP_WR_KV_FLAG(x) ((x) << S_FW_FOISCSI_CHAP_WR_KV_FLAG) #define G_FW_FOISCSI_CHAP_WR_KV_FLAG(x) \ (((x) >> S_FW_FOISCSI_CHAP_WR_KV_FLAG) & M_FW_FOISCSI_CHAP_WR_KV_FLAG) #define F_FW_FOISCSI_CHAP_WR_KV_FLAG V_FW_FOISCSI_CHAP_WR_KV_FLAG(1U) /****************************************************************************** * C O i S C S I W O R K R E Q U E S T S ********************************************/ enum fw_chnet_addr_type { FW_CHNET_ADDD_TYPE_NONE = 0, FW_CHNET_ADDR_TYPE_IPV4, FW_CHNET_ADDR_TYPE_IPV6, }; enum fw_msg_wr_type { FW_MSG_WR_TYPE_RPL = 0, FW_MSG_WR_TYPE_ERR, FW_MSG_WR_TYPE_PLD, }; struct fw_coiscsi_tgt_wr { __be32 op_compl; __be32 flowid_len16; __u64 cookie; __u8 subop; __u8 status; __be16 r4; __be32 flags; struct fw_coiscsi_tgt_conn_attr { __be32 in_tid; __be16 in_port; __u8 in_type; __u8 r6; union fw_coiscsi_tgt_conn_attr_addr { struct fw_coiscsi_tgt_conn_attr_in_addr { __be32 addr; __be32 r7; __be32 r8[2]; } in_addr; struct fw_coiscsi_tgt_conn_attr_in_addr6 { __be64 addr[2]; } in_addr6; } u; } conn_attr; }; #define S_FW_COISCSI_TGT_WR_PORTID 0 #define M_FW_COISCSI_TGT_WR_PORTID 0x7 #define V_FW_COISCSI_TGT_WR_PORTID(x) ((x) << S_FW_COISCSI_TGT_WR_PORTID) #define G_FW_COISCSI_TGT_WR_PORTID(x) \ (((x) >> S_FW_COISCSI_TGT_WR_PORTID) & M_FW_COISCSI_TGT_WR_PORTID) struct fw_coiscsi_tgt_conn_wr { __be32 op_compl; __be32 flowid_len16; __u64 cookie; __u8 subop; __u8 status; __be16 iq_id; __be32 in_stid; __be32 io_id; __be32 flags_fin; union { struct fw_coiscsi_tgt_conn_tcp { __be16 in_sport; __be16 in_dport; __u8 wscale_wsen; __u8 r4[3]; union fw_coiscsi_tgt_conn_tcp_addr { struct fw_coiscsi_tgt_conn_tcp_in_addr { __be32 saddr; __be32 daddr; } in_addr; struct fw_coiscsi_tgt_conn_tcp_in_addr6 { __be64 saddr[2]; __be64 daddr[2]; } in_addr6; } u; } conn_tcp; struct fw_coiscsi_tgt_conn_stats { __be32 ddp_reqs; __be32 ddp_cmpls; __be16 ddp_aborts; __be16 ddp_bps; } stats; } u; struct fw_coiscsi_tgt_conn_iscsi { __be32 hdigest_to_ddp_pgsz; __be32 tgt_id; __be16 max_r2t; __be16 r5; __be32 max_burst; __be32 max_rdsl; __be32 max_tdsl; __be32 cur_sn; __be32 r6; } conn_iscsi; }; #define S_FW_COISCSI_TGT_CONN_WR_PORTID 0 #define M_FW_COISCSI_TGT_CONN_WR_PORTID 0x7 #define V_FW_COISCSI_TGT_CONN_WR_PORTID(x) \ ((x) << S_FW_COISCSI_TGT_CONN_WR_PORTID) #define G_FW_COISCSI_TGT_CONN_WR_PORTID(x) \ (((x) >> S_FW_COISCSI_TGT_CONN_WR_PORTID) & \ M_FW_COISCSI_TGT_CONN_WR_PORTID) #define S_FW_COISCSI_TGT_CONN_WR_FIN 0 #define M_FW_COISCSI_TGT_CONN_WR_FIN 0x1 #define V_FW_COISCSI_TGT_CONN_WR_FIN(x) ((x) << S_FW_COISCSI_TGT_CONN_WR_FIN) #define G_FW_COISCSI_TGT_CONN_WR_FIN(x) \ (((x) >> S_FW_COISCSI_TGT_CONN_WR_FIN) & M_FW_COISCSI_TGT_CONN_WR_FIN) #define F_FW_COISCSI_TGT_CONN_WR_FIN V_FW_COISCSI_TGT_CONN_WR_FIN(1U) #define S_FW_COISCSI_TGT_CONN_WR_WSCALE 1 #define M_FW_COISCSI_TGT_CONN_WR_WSCALE 0xf #define V_FW_COISCSI_TGT_CONN_WR_WSCALE(x) \ ((x) << S_FW_COISCSI_TGT_CONN_WR_WSCALE) #define G_FW_COISCSI_TGT_CONN_WR_WSCALE(x) \ (((x) >> S_FW_COISCSI_TGT_CONN_WR_WSCALE) & \ M_FW_COISCSI_TGT_CONN_WR_WSCALE) #define S_FW_COISCSI_TGT_CONN_WR_WSEN 0 #define M_FW_COISCSI_TGT_CONN_WR_WSEN 0x1 #define V_FW_COISCSI_TGT_CONN_WR_WSEN(x) \ ((x) << S_FW_COISCSI_TGT_CONN_WR_WSEN) #define G_FW_COISCSI_TGT_CONN_WR_WSEN(x) \ (((x) >> S_FW_COISCSI_TGT_CONN_WR_WSEN) & M_FW_COISCSI_TGT_CONN_WR_WSEN) #define F_FW_COISCSI_TGT_CONN_WR_WSEN V_FW_COISCSI_TGT_CONN_WR_WSEN(1U) struct fw_coiscsi_tgt_xmit_wr { __be32 op_to_immdlen; union { struct cmpl_stat { __be32 cmpl_status_pkd; } cs; struct flowid_len { __be32 flowid_len16; } fllen; } u; __u64 cookie; __be16 iq_id; __be16 r3; __be32 pz_off; __be32 t_xfer_len; union { __be32 tag; __be32 datasn; __be32 ddp_status; } cu; }; #define S_FW_COISCSI_TGT_XMIT_WR_DDGST 23 #define M_FW_COISCSI_TGT_XMIT_WR_DDGST 0x1 #define V_FW_COISCSI_TGT_XMIT_WR_DDGST(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_DDGST) #define G_FW_COISCSI_TGT_XMIT_WR_DDGST(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_DDGST) & M_FW_COISCSI_TGT_XMIT_WR_DDGST) #define F_FW_COISCSI_TGT_XMIT_WR_DDGST V_FW_COISCSI_TGT_XMIT_WR_DDGST(1U) #define S_FW_COISCSI_TGT_XMIT_WR_HDGST 22 #define M_FW_COISCSI_TGT_XMIT_WR_HDGST 0x1 #define V_FW_COISCSI_TGT_XMIT_WR_HDGST(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_HDGST) #define G_FW_COISCSI_TGT_XMIT_WR_HDGST(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_HDGST) & M_FW_COISCSI_TGT_XMIT_WR_HDGST) #define F_FW_COISCSI_TGT_XMIT_WR_HDGST V_FW_COISCSI_TGT_XMIT_WR_HDGST(1U) #define S_FW_COISCSI_TGT_XMIT_WR_DDP 20 #define M_FW_COISCSI_TGT_XMIT_WR_DDP 0x1 #define V_FW_COISCSI_TGT_XMIT_WR_DDP(x) ((x) << S_FW_COISCSI_TGT_XMIT_WR_DDP) #define G_FW_COISCSI_TGT_XMIT_WR_DDP(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_DDP) & M_FW_COISCSI_TGT_XMIT_WR_DDP) #define F_FW_COISCSI_TGT_XMIT_WR_DDP V_FW_COISCSI_TGT_XMIT_WR_DDP(1U) #define S_FW_COISCSI_TGT_XMIT_WR_ABORT 19 #define M_FW_COISCSI_TGT_XMIT_WR_ABORT 0x1 #define V_FW_COISCSI_TGT_XMIT_WR_ABORT(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_ABORT) #define G_FW_COISCSI_TGT_XMIT_WR_ABORT(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_ABORT) & M_FW_COISCSI_TGT_XMIT_WR_ABORT) #define F_FW_COISCSI_TGT_XMIT_WR_ABORT V_FW_COISCSI_TGT_XMIT_WR_ABORT(1U) #define S_FW_COISCSI_TGT_XMIT_WR_FINAL 18 #define M_FW_COISCSI_TGT_XMIT_WR_FINAL 0x1 #define V_FW_COISCSI_TGT_XMIT_WR_FINAL(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_FINAL) #define G_FW_COISCSI_TGT_XMIT_WR_FINAL(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_FINAL) & M_FW_COISCSI_TGT_XMIT_WR_FINAL) #define F_FW_COISCSI_TGT_XMIT_WR_FINAL V_FW_COISCSI_TGT_XMIT_WR_FINAL(1U) #define S_FW_COISCSI_TGT_XMIT_WR_PADLEN 16 #define M_FW_COISCSI_TGT_XMIT_WR_PADLEN 0x3 #define V_FW_COISCSI_TGT_XMIT_WR_PADLEN(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_PADLEN) #define G_FW_COISCSI_TGT_XMIT_WR_PADLEN(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_PADLEN) & \ M_FW_COISCSI_TGT_XMIT_WR_PADLEN) #define S_FW_COISCSI_TGT_XMIT_WR_INCSTATSN 15 #define M_FW_COISCSI_TGT_XMIT_WR_INCSTATSN 0x1 #define V_FW_COISCSI_TGT_XMIT_WR_INCSTATSN(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_INCSTATSN) #define G_FW_COISCSI_TGT_XMIT_WR_INCSTATSN(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_INCSTATSN) & \ M_FW_COISCSI_TGT_XMIT_WR_INCSTATSN) #define F_FW_COISCSI_TGT_XMIT_WR_INCSTATSN \ V_FW_COISCSI_TGT_XMIT_WR_INCSTATSN(1U) #define S_FW_COISCSI_TGT_XMIT_WR_IMMDLEN 0 #define M_FW_COISCSI_TGT_XMIT_WR_IMMDLEN 0xff #define V_FW_COISCSI_TGT_XMIT_WR_IMMDLEN(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_IMMDLEN) #define G_FW_COISCSI_TGT_XMIT_WR_IMMDLEN(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_IMMDLEN) & \ M_FW_COISCSI_TGT_XMIT_WR_IMMDLEN) #define S_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS 8 #define M_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS 0xff #define V_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS(x) \ ((x) << S_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS) #define G_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS(x) \ (((x) >> S_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS) & \ M_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS) struct fw_coiscsi_stats_wr { __be32 op_compl; __be32 flowid_len16; __u64 cookie; __u8 subop; __u8 status; union fw_coiscsi_stats { struct fw_coiscsi_resource { __u8 num_ipv4_tgt; __u8 num_ipv6_tgt; __be16 num_l2t_entries; __be16 num_csocks; __be16 num_tasks; __be16 num_ppods_zone[11]; __be32 num_bufll64; __u8 r2[12]; } rsrc; } u; }; #define S_FW_COISCSI_STATS_WR_PORTID 0 #define M_FW_COISCSI_STATS_WR_PORTID 0x7 #define V_FW_COISCSI_STATS_WR_PORTID(x) ((x) << S_FW_COISCSI_STATS_WR_PORTID) #define G_FW_COISCSI_STATS_WR_PORTID(x) \ (((x) >> S_FW_COISCSI_STATS_WR_PORTID) & M_FW_COISCSI_STATS_WR_PORTID) struct fw_isns_wr { __be32 op_compl; __be32 flowid_len16; __u64 cookie; __u8 subop; __u8 status; __be16 iq_id; __be16 vlanid; __be16 r4; struct fw_tcp_conn_attr { __be32 in_tid; __be16 in_port; __u8 in_type; __u8 r6; union fw_tcp_conn_attr_addr { struct fw_tcp_conn_attr_in_addr { __be32 addr; __be32 r7; __be32 r8[2]; } in_addr; struct fw_tcp_conn_attr_in_addr6 { __be64 addr[2]; } in_addr6; } u; } conn_attr; }; #define S_FW_ISNS_WR_PORTID 0 #define M_FW_ISNS_WR_PORTID 0x7 #define V_FW_ISNS_WR_PORTID(x) ((x) << S_FW_ISNS_WR_PORTID) #define G_FW_ISNS_WR_PORTID(x) \ (((x) >> S_FW_ISNS_WR_PORTID) & M_FW_ISNS_WR_PORTID) struct fw_isns_xmit_wr { __be32 op_to_immdlen; __be32 flowid_len16; __u64 cookie; __be16 iq_id; __be16 r4; __be32 xfer_len; __be64 r5; }; #define S_FW_ISNS_XMIT_WR_IMMDLEN 0 #define M_FW_ISNS_XMIT_WR_IMMDLEN 0xff #define V_FW_ISNS_XMIT_WR_IMMDLEN(x) ((x) << S_FW_ISNS_XMIT_WR_IMMDLEN) #define G_FW_ISNS_XMIT_WR_IMMDLEN(x) \ (((x) >> S_FW_ISNS_XMIT_WR_IMMDLEN) & M_FW_ISNS_XMIT_WR_IMMDLEN) /****************************************************************************** * F O F C O E W O R K R E Q U E S T s *******************************************/ struct fw_fcoe_els_ct_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 tmo_val; __u8 els_ct_type; __u8 ctl_pri; __u8 cp_en_class; __be16 xfer_cnt; __u8 fl_to_sp; __u8 l_id[3]; __u8 r5; __u8 r_id[3]; __be64 rsp_dmaaddr; __be32 rsp_dmalen; __be32 r6; }; #define S_FW_FCOE_ELS_CT_WR_OPCODE 24 #define M_FW_FCOE_ELS_CT_WR_OPCODE 0xff #define V_FW_FCOE_ELS_CT_WR_OPCODE(x) ((x) << S_FW_FCOE_ELS_CT_WR_OPCODE) #define G_FW_FCOE_ELS_CT_WR_OPCODE(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_OPCODE) & M_FW_FCOE_ELS_CT_WR_OPCODE) #define S_FW_FCOE_ELS_CT_WR_IMMDLEN 0 #define M_FW_FCOE_ELS_CT_WR_IMMDLEN 0xff #define V_FW_FCOE_ELS_CT_WR_IMMDLEN(x) ((x) << S_FW_FCOE_ELS_CT_WR_IMMDLEN) #define G_FW_FCOE_ELS_CT_WR_IMMDLEN(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_IMMDLEN) & M_FW_FCOE_ELS_CT_WR_IMMDLEN) #define S_FW_FCOE_ELS_CT_WR_FLOWID 8 #define M_FW_FCOE_ELS_CT_WR_FLOWID 0xfffff #define V_FW_FCOE_ELS_CT_WR_FLOWID(x) ((x) << S_FW_FCOE_ELS_CT_WR_FLOWID) #define G_FW_FCOE_ELS_CT_WR_FLOWID(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_FLOWID) & M_FW_FCOE_ELS_CT_WR_FLOWID) #define S_FW_FCOE_ELS_CT_WR_LEN16 0 #define M_FW_FCOE_ELS_CT_WR_LEN16 0xff #define V_FW_FCOE_ELS_CT_WR_LEN16(x) ((x) << S_FW_FCOE_ELS_CT_WR_LEN16) #define G_FW_FCOE_ELS_CT_WR_LEN16(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_LEN16) & M_FW_FCOE_ELS_CT_WR_LEN16) #define S_FW_FCOE_ELS_CT_WR_CP_EN 6 #define M_FW_FCOE_ELS_CT_WR_CP_EN 0x3 #define V_FW_FCOE_ELS_CT_WR_CP_EN(x) ((x) << S_FW_FCOE_ELS_CT_WR_CP_EN) #define G_FW_FCOE_ELS_CT_WR_CP_EN(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_CP_EN) & M_FW_FCOE_ELS_CT_WR_CP_EN) #define S_FW_FCOE_ELS_CT_WR_CLASS 4 #define M_FW_FCOE_ELS_CT_WR_CLASS 0x3 #define V_FW_FCOE_ELS_CT_WR_CLASS(x) ((x) << S_FW_FCOE_ELS_CT_WR_CLASS) #define G_FW_FCOE_ELS_CT_WR_CLASS(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_CLASS) & M_FW_FCOE_ELS_CT_WR_CLASS) #define S_FW_FCOE_ELS_CT_WR_FL 2 #define M_FW_FCOE_ELS_CT_WR_FL 0x1 #define V_FW_FCOE_ELS_CT_WR_FL(x) ((x) << S_FW_FCOE_ELS_CT_WR_FL) #define G_FW_FCOE_ELS_CT_WR_FL(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_FL) & M_FW_FCOE_ELS_CT_WR_FL) #define F_FW_FCOE_ELS_CT_WR_FL V_FW_FCOE_ELS_CT_WR_FL(1U) #define S_FW_FCOE_ELS_CT_WR_NPIV 1 #define M_FW_FCOE_ELS_CT_WR_NPIV 0x1 #define V_FW_FCOE_ELS_CT_WR_NPIV(x) ((x) << S_FW_FCOE_ELS_CT_WR_NPIV) #define G_FW_FCOE_ELS_CT_WR_NPIV(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_NPIV) & M_FW_FCOE_ELS_CT_WR_NPIV) #define F_FW_FCOE_ELS_CT_WR_NPIV V_FW_FCOE_ELS_CT_WR_NPIV(1U) #define S_FW_FCOE_ELS_CT_WR_SP 0 #define M_FW_FCOE_ELS_CT_WR_SP 0x1 #define V_FW_FCOE_ELS_CT_WR_SP(x) ((x) << S_FW_FCOE_ELS_CT_WR_SP) #define G_FW_FCOE_ELS_CT_WR_SP(x) \ (((x) >> S_FW_FCOE_ELS_CT_WR_SP) & M_FW_FCOE_ELS_CT_WR_SP) #define F_FW_FCOE_ELS_CT_WR_SP V_FW_FCOE_ELS_CT_WR_SP(1U) /****************************************************************************** * S C S I W O R K R E Q U E S T s (FOiSCSI and FCOE unified data path) *****************************************************************************/ struct fw_scsi_write_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 tmo_val; __u8 use_xfer_cnt; union fw_scsi_write_priv { struct fcoe_write_priv { __u8 ctl_pri; __u8 cp_en_class; __u8 r3_lo[2]; } fcoe; struct iscsi_write_priv { __u8 r3[4]; } iscsi; } u; __be32 xfer_cnt; __be32 ini_xfer_cnt; __be64 rsp_dmaaddr; __be32 rsp_dmalen; __be32 r4; }; #define S_FW_SCSI_WRITE_WR_OPCODE 24 #define M_FW_SCSI_WRITE_WR_OPCODE 0xff #define V_FW_SCSI_WRITE_WR_OPCODE(x) ((x) << S_FW_SCSI_WRITE_WR_OPCODE) #define G_FW_SCSI_WRITE_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_WRITE_WR_OPCODE) & M_FW_SCSI_WRITE_WR_OPCODE) #define S_FW_SCSI_WRITE_WR_IMMDLEN 0 #define M_FW_SCSI_WRITE_WR_IMMDLEN 0xff #define V_FW_SCSI_WRITE_WR_IMMDLEN(x) ((x) << S_FW_SCSI_WRITE_WR_IMMDLEN) #define G_FW_SCSI_WRITE_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_WRITE_WR_IMMDLEN) & M_FW_SCSI_WRITE_WR_IMMDLEN) #define S_FW_SCSI_WRITE_WR_FLOWID 8 #define M_FW_SCSI_WRITE_WR_FLOWID 0xfffff #define V_FW_SCSI_WRITE_WR_FLOWID(x) ((x) << S_FW_SCSI_WRITE_WR_FLOWID) #define G_FW_SCSI_WRITE_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_WRITE_WR_FLOWID) & M_FW_SCSI_WRITE_WR_FLOWID) #define S_FW_SCSI_WRITE_WR_LEN16 0 #define M_FW_SCSI_WRITE_WR_LEN16 0xff #define V_FW_SCSI_WRITE_WR_LEN16(x) ((x) << S_FW_SCSI_WRITE_WR_LEN16) #define G_FW_SCSI_WRITE_WR_LEN16(x) \ (((x) >> S_FW_SCSI_WRITE_WR_LEN16) & M_FW_SCSI_WRITE_WR_LEN16) #define S_FW_SCSI_WRITE_WR_CP_EN 6 #define M_FW_SCSI_WRITE_WR_CP_EN 0x3 #define V_FW_SCSI_WRITE_WR_CP_EN(x) ((x) << S_FW_SCSI_WRITE_WR_CP_EN) #define G_FW_SCSI_WRITE_WR_CP_EN(x) \ (((x) >> S_FW_SCSI_WRITE_WR_CP_EN) & M_FW_SCSI_WRITE_WR_CP_EN) #define S_FW_SCSI_WRITE_WR_CLASS 4 #define M_FW_SCSI_WRITE_WR_CLASS 0x3 #define V_FW_SCSI_WRITE_WR_CLASS(x) ((x) << S_FW_SCSI_WRITE_WR_CLASS) #define G_FW_SCSI_WRITE_WR_CLASS(x) \ (((x) >> S_FW_SCSI_WRITE_WR_CLASS) & M_FW_SCSI_WRITE_WR_CLASS) struct fw_scsi_read_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 tmo_val; __u8 use_xfer_cnt; union fw_scsi_read_priv { struct fcoe_read_priv { __u8 ctl_pri; __u8 cp_en_class; __u8 r3_lo[2]; } fcoe; struct iscsi_read_priv { __u8 r3[4]; } iscsi; } u; __be32 xfer_cnt; __be32 ini_xfer_cnt; __be64 rsp_dmaaddr; __be32 rsp_dmalen; __be32 r4; }; #define S_FW_SCSI_READ_WR_OPCODE 24 #define M_FW_SCSI_READ_WR_OPCODE 0xff #define V_FW_SCSI_READ_WR_OPCODE(x) ((x) << S_FW_SCSI_READ_WR_OPCODE) #define G_FW_SCSI_READ_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_READ_WR_OPCODE) & M_FW_SCSI_READ_WR_OPCODE) #define S_FW_SCSI_READ_WR_IMMDLEN 0 #define M_FW_SCSI_READ_WR_IMMDLEN 0xff #define V_FW_SCSI_READ_WR_IMMDLEN(x) ((x) << S_FW_SCSI_READ_WR_IMMDLEN) #define G_FW_SCSI_READ_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_READ_WR_IMMDLEN) & M_FW_SCSI_READ_WR_IMMDLEN) #define S_FW_SCSI_READ_WR_FLOWID 8 #define M_FW_SCSI_READ_WR_FLOWID 0xfffff #define V_FW_SCSI_READ_WR_FLOWID(x) ((x) << S_FW_SCSI_READ_WR_FLOWID) #define G_FW_SCSI_READ_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_READ_WR_FLOWID) & M_FW_SCSI_READ_WR_FLOWID) #define S_FW_SCSI_READ_WR_LEN16 0 #define M_FW_SCSI_READ_WR_LEN16 0xff #define V_FW_SCSI_READ_WR_LEN16(x) ((x) << S_FW_SCSI_READ_WR_LEN16) #define G_FW_SCSI_READ_WR_LEN16(x) \ (((x) >> S_FW_SCSI_READ_WR_LEN16) & M_FW_SCSI_READ_WR_LEN16) #define S_FW_SCSI_READ_WR_CP_EN 6 #define M_FW_SCSI_READ_WR_CP_EN 0x3 #define V_FW_SCSI_READ_WR_CP_EN(x) ((x) << S_FW_SCSI_READ_WR_CP_EN) #define G_FW_SCSI_READ_WR_CP_EN(x) \ (((x) >> S_FW_SCSI_READ_WR_CP_EN) & M_FW_SCSI_READ_WR_CP_EN) #define S_FW_SCSI_READ_WR_CLASS 4 #define M_FW_SCSI_READ_WR_CLASS 0x3 #define V_FW_SCSI_READ_WR_CLASS(x) ((x) << S_FW_SCSI_READ_WR_CLASS) #define G_FW_SCSI_READ_WR_CLASS(x) \ (((x) >> S_FW_SCSI_READ_WR_CLASS) & M_FW_SCSI_READ_WR_CLASS) struct fw_scsi_cmd_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 tmo_val; __u8 r3; union fw_scsi_cmd_priv { struct fcoe_cmd_priv { __u8 ctl_pri; __u8 cp_en_class; __u8 r4_lo[2]; } fcoe; struct iscsi_cmd_priv { __u8 r4[4]; } iscsi; } u; __u8 r5[8]; __be64 rsp_dmaaddr; __be32 rsp_dmalen; __be32 r6; }; #define S_FW_SCSI_CMD_WR_OPCODE 24 #define M_FW_SCSI_CMD_WR_OPCODE 0xff #define V_FW_SCSI_CMD_WR_OPCODE(x) ((x) << S_FW_SCSI_CMD_WR_OPCODE) #define G_FW_SCSI_CMD_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_CMD_WR_OPCODE) & M_FW_SCSI_CMD_WR_OPCODE) #define S_FW_SCSI_CMD_WR_IMMDLEN 0 #define M_FW_SCSI_CMD_WR_IMMDLEN 0xff #define V_FW_SCSI_CMD_WR_IMMDLEN(x) ((x) << S_FW_SCSI_CMD_WR_IMMDLEN) #define G_FW_SCSI_CMD_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_CMD_WR_IMMDLEN) & M_FW_SCSI_CMD_WR_IMMDLEN) #define S_FW_SCSI_CMD_WR_FLOWID 8 #define M_FW_SCSI_CMD_WR_FLOWID 0xfffff #define V_FW_SCSI_CMD_WR_FLOWID(x) ((x) << S_FW_SCSI_CMD_WR_FLOWID) #define G_FW_SCSI_CMD_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_CMD_WR_FLOWID) & M_FW_SCSI_CMD_WR_FLOWID) #define S_FW_SCSI_CMD_WR_LEN16 0 #define M_FW_SCSI_CMD_WR_LEN16 0xff #define V_FW_SCSI_CMD_WR_LEN16(x) ((x) << S_FW_SCSI_CMD_WR_LEN16) #define G_FW_SCSI_CMD_WR_LEN16(x) \ (((x) >> S_FW_SCSI_CMD_WR_LEN16) & M_FW_SCSI_CMD_WR_LEN16) #define S_FW_SCSI_CMD_WR_CP_EN 6 #define M_FW_SCSI_CMD_WR_CP_EN 0x3 #define V_FW_SCSI_CMD_WR_CP_EN(x) ((x) << S_FW_SCSI_CMD_WR_CP_EN) #define G_FW_SCSI_CMD_WR_CP_EN(x) \ (((x) >> S_FW_SCSI_CMD_WR_CP_EN) & M_FW_SCSI_CMD_WR_CP_EN) #define S_FW_SCSI_CMD_WR_CLASS 4 #define M_FW_SCSI_CMD_WR_CLASS 0x3 #define V_FW_SCSI_CMD_WR_CLASS(x) ((x) << S_FW_SCSI_CMD_WR_CLASS) #define G_FW_SCSI_CMD_WR_CLASS(x) \ (((x) >> S_FW_SCSI_CMD_WR_CLASS) & M_FW_SCSI_CMD_WR_CLASS) struct fw_scsi_abrt_cls_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 tmo_val; __u8 sub_opcode_to_chk_all_io; __u8 r3[4]; __be64 t_cookie; }; #define S_FW_SCSI_ABRT_CLS_WR_OPCODE 24 #define M_FW_SCSI_ABRT_CLS_WR_OPCODE 0xff #define V_FW_SCSI_ABRT_CLS_WR_OPCODE(x) ((x) << S_FW_SCSI_ABRT_CLS_WR_OPCODE) #define G_FW_SCSI_ABRT_CLS_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_OPCODE) & M_FW_SCSI_ABRT_CLS_WR_OPCODE) #define S_FW_SCSI_ABRT_CLS_WR_IMMDLEN 0 #define M_FW_SCSI_ABRT_CLS_WR_IMMDLEN 0xff #define V_FW_SCSI_ABRT_CLS_WR_IMMDLEN(x) \ ((x) << S_FW_SCSI_ABRT_CLS_WR_IMMDLEN) #define G_FW_SCSI_ABRT_CLS_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_IMMDLEN) & M_FW_SCSI_ABRT_CLS_WR_IMMDLEN) #define S_FW_SCSI_ABRT_CLS_WR_FLOWID 8 #define M_FW_SCSI_ABRT_CLS_WR_FLOWID 0xfffff #define V_FW_SCSI_ABRT_CLS_WR_FLOWID(x) ((x) << S_FW_SCSI_ABRT_CLS_WR_FLOWID) #define G_FW_SCSI_ABRT_CLS_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_FLOWID) & M_FW_SCSI_ABRT_CLS_WR_FLOWID) #define S_FW_SCSI_ABRT_CLS_WR_LEN16 0 #define M_FW_SCSI_ABRT_CLS_WR_LEN16 0xff #define V_FW_SCSI_ABRT_CLS_WR_LEN16(x) ((x) << S_FW_SCSI_ABRT_CLS_WR_LEN16) #define G_FW_SCSI_ABRT_CLS_WR_LEN16(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_LEN16) & M_FW_SCSI_ABRT_CLS_WR_LEN16) #define S_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE 2 #define M_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE 0x3f #define V_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) \ ((x) << S_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE) #define G_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE) & \ M_FW_SCSI_ABRT_CLS_WR_SUB_OPCODE) #define S_FW_SCSI_ABRT_CLS_WR_UNSOL 1 #define M_FW_SCSI_ABRT_CLS_WR_UNSOL 0x1 #define V_FW_SCSI_ABRT_CLS_WR_UNSOL(x) ((x) << S_FW_SCSI_ABRT_CLS_WR_UNSOL) #define G_FW_SCSI_ABRT_CLS_WR_UNSOL(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_UNSOL) & M_FW_SCSI_ABRT_CLS_WR_UNSOL) #define F_FW_SCSI_ABRT_CLS_WR_UNSOL V_FW_SCSI_ABRT_CLS_WR_UNSOL(1U) #define S_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO 0 #define M_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO 0x1 #define V_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) \ ((x) << S_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO) #define G_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) \ (((x) >> S_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO) & \ M_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO) #define F_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO \ V_FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(1U) struct fw_scsi_tgt_acc_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 r3; __u8 use_burst_len; union fw_scsi_tgt_acc_priv { struct fcoe_tgt_acc_priv { __u8 ctl_pri; __u8 cp_en_class; __u8 r4_lo[2]; } fcoe; struct iscsi_tgt_acc_priv { __u8 r4[4]; } iscsi; } u; __be32 burst_len; __be32 rel_off; __be64 r5; __be32 r6; __be32 tot_xfer_len; }; #define S_FW_SCSI_TGT_ACC_WR_OPCODE 24 #define M_FW_SCSI_TGT_ACC_WR_OPCODE 0xff #define V_FW_SCSI_TGT_ACC_WR_OPCODE(x) ((x) << S_FW_SCSI_TGT_ACC_WR_OPCODE) #define G_FW_SCSI_TGT_ACC_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_TGT_ACC_WR_OPCODE) & M_FW_SCSI_TGT_ACC_WR_OPCODE) #define S_FW_SCSI_TGT_ACC_WR_IMMDLEN 0 #define M_FW_SCSI_TGT_ACC_WR_IMMDLEN 0xff #define V_FW_SCSI_TGT_ACC_WR_IMMDLEN(x) ((x) << S_FW_SCSI_TGT_ACC_WR_IMMDLEN) #define G_FW_SCSI_TGT_ACC_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_TGT_ACC_WR_IMMDLEN) & M_FW_SCSI_TGT_ACC_WR_IMMDLEN) #define S_FW_SCSI_TGT_ACC_WR_FLOWID 8 #define M_FW_SCSI_TGT_ACC_WR_FLOWID 0xfffff #define V_FW_SCSI_TGT_ACC_WR_FLOWID(x) ((x) << S_FW_SCSI_TGT_ACC_WR_FLOWID) #define G_FW_SCSI_TGT_ACC_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_TGT_ACC_WR_FLOWID) & M_FW_SCSI_TGT_ACC_WR_FLOWID) #define S_FW_SCSI_TGT_ACC_WR_LEN16 0 #define M_FW_SCSI_TGT_ACC_WR_LEN16 0xff #define V_FW_SCSI_TGT_ACC_WR_LEN16(x) ((x) << S_FW_SCSI_TGT_ACC_WR_LEN16) #define G_FW_SCSI_TGT_ACC_WR_LEN16(x) \ (((x) >> S_FW_SCSI_TGT_ACC_WR_LEN16) & M_FW_SCSI_TGT_ACC_WR_LEN16) #define S_FW_SCSI_TGT_ACC_WR_CP_EN 6 #define M_FW_SCSI_TGT_ACC_WR_CP_EN 0x3 #define V_FW_SCSI_TGT_ACC_WR_CP_EN(x) ((x) << S_FW_SCSI_TGT_ACC_WR_CP_EN) #define G_FW_SCSI_TGT_ACC_WR_CP_EN(x) \ (((x) >> S_FW_SCSI_TGT_ACC_WR_CP_EN) & M_FW_SCSI_TGT_ACC_WR_CP_EN) #define S_FW_SCSI_TGT_ACC_WR_CLASS 4 #define M_FW_SCSI_TGT_ACC_WR_CLASS 0x3 #define V_FW_SCSI_TGT_ACC_WR_CLASS(x) ((x) << S_FW_SCSI_TGT_ACC_WR_CLASS) #define G_FW_SCSI_TGT_ACC_WR_CLASS(x) \ (((x) >> S_FW_SCSI_TGT_ACC_WR_CLASS) & M_FW_SCSI_TGT_ACC_WR_CLASS) struct fw_scsi_tgt_xmit_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 auto_rsp; __u8 use_xfer_cnt; union fw_scsi_tgt_xmit_priv { struct fcoe_tgt_xmit_priv { __u8 ctl_pri; __u8 cp_en_class; __u8 r3_lo[2]; } fcoe; struct iscsi_tgt_xmit_priv { __u8 r3[4]; } iscsi; } u; __be32 xfer_cnt; __be32 r4; __be64 r5; __be32 r6; __be32 tot_xfer_len; }; #define S_FW_SCSI_TGT_XMIT_WR_OPCODE 24 #define M_FW_SCSI_TGT_XMIT_WR_OPCODE 0xff #define V_FW_SCSI_TGT_XMIT_WR_OPCODE(x) ((x) << S_FW_SCSI_TGT_XMIT_WR_OPCODE) #define G_FW_SCSI_TGT_XMIT_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_TGT_XMIT_WR_OPCODE) & M_FW_SCSI_TGT_XMIT_WR_OPCODE) #define S_FW_SCSI_TGT_XMIT_WR_IMMDLEN 0 #define M_FW_SCSI_TGT_XMIT_WR_IMMDLEN 0xff #define V_FW_SCSI_TGT_XMIT_WR_IMMDLEN(x) \ ((x) << S_FW_SCSI_TGT_XMIT_WR_IMMDLEN) #define G_FW_SCSI_TGT_XMIT_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_TGT_XMIT_WR_IMMDLEN) & M_FW_SCSI_TGT_XMIT_WR_IMMDLEN) #define S_FW_SCSI_TGT_XMIT_WR_FLOWID 8 #define M_FW_SCSI_TGT_XMIT_WR_FLOWID 0xfffff #define V_FW_SCSI_TGT_XMIT_WR_FLOWID(x) ((x) << S_FW_SCSI_TGT_XMIT_WR_FLOWID) #define G_FW_SCSI_TGT_XMIT_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_TGT_XMIT_WR_FLOWID) & M_FW_SCSI_TGT_XMIT_WR_FLOWID) #define S_FW_SCSI_TGT_XMIT_WR_LEN16 0 #define M_FW_SCSI_TGT_XMIT_WR_LEN16 0xff #define V_FW_SCSI_TGT_XMIT_WR_LEN16(x) ((x) << S_FW_SCSI_TGT_XMIT_WR_LEN16) #define G_FW_SCSI_TGT_XMIT_WR_LEN16(x) \ (((x) >> S_FW_SCSI_TGT_XMIT_WR_LEN16) & M_FW_SCSI_TGT_XMIT_WR_LEN16) #define S_FW_SCSI_TGT_XMIT_WR_CP_EN 6 #define M_FW_SCSI_TGT_XMIT_WR_CP_EN 0x3 #define V_FW_SCSI_TGT_XMIT_WR_CP_EN(x) ((x) << S_FW_SCSI_TGT_XMIT_WR_CP_EN) #define G_FW_SCSI_TGT_XMIT_WR_CP_EN(x) \ (((x) >> S_FW_SCSI_TGT_XMIT_WR_CP_EN) & M_FW_SCSI_TGT_XMIT_WR_CP_EN) #define S_FW_SCSI_TGT_XMIT_WR_CLASS 4 #define M_FW_SCSI_TGT_XMIT_WR_CLASS 0x3 #define V_FW_SCSI_TGT_XMIT_WR_CLASS(x) ((x) << S_FW_SCSI_TGT_XMIT_WR_CLASS) #define G_FW_SCSI_TGT_XMIT_WR_CLASS(x) \ (((x) >> S_FW_SCSI_TGT_XMIT_WR_CLASS) & M_FW_SCSI_TGT_XMIT_WR_CLASS) struct fw_scsi_tgt_rsp_wr { __be32 op_immdlen; __be32 flowid_len16; __be64 cookie; __be16 iqid; __u8 r3[2]; union fw_scsi_tgt_rsp_priv { struct fcoe_tgt_rsp_priv { __u8 ctl_pri; __u8 cp_en_class; __u8 r4_lo[2]; } fcoe; struct iscsi_tgt_rsp_priv { __u8 r4[4]; } iscsi; } u; __u8 r5[8]; }; #define S_FW_SCSI_TGT_RSP_WR_OPCODE 24 #define M_FW_SCSI_TGT_RSP_WR_OPCODE 0xff #define V_FW_SCSI_TGT_RSP_WR_OPCODE(x) ((x) << S_FW_SCSI_TGT_RSP_WR_OPCODE) #define G_FW_SCSI_TGT_RSP_WR_OPCODE(x) \ (((x) >> S_FW_SCSI_TGT_RSP_WR_OPCODE) & M_FW_SCSI_TGT_RSP_WR_OPCODE) #define S_FW_SCSI_TGT_RSP_WR_IMMDLEN 0 #define M_FW_SCSI_TGT_RSP_WR_IMMDLEN 0xff #define V_FW_SCSI_TGT_RSP_WR_IMMDLEN(x) ((x) << S_FW_SCSI_TGT_RSP_WR_IMMDLEN) #define G_FW_SCSI_TGT_RSP_WR_IMMDLEN(x) \ (((x) >> S_FW_SCSI_TGT_RSP_WR_IMMDLEN) & M_FW_SCSI_TGT_RSP_WR_IMMDLEN) #define S_FW_SCSI_TGT_RSP_WR_FLOWID 8 #define M_FW_SCSI_TGT_RSP_WR_FLOWID 0xfffff #define V_FW_SCSI_TGT_RSP_WR_FLOWID(x) ((x) << S_FW_SCSI_TGT_RSP_WR_FLOWID) #define G_FW_SCSI_TGT_RSP_WR_FLOWID(x) \ (((x) >> S_FW_SCSI_TGT_RSP_WR_FLOWID) & M_FW_SCSI_TGT_RSP_WR_FLOWID) #define S_FW_SCSI_TGT_RSP_WR_LEN16 0 #define M_FW_SCSI_TGT_RSP_WR_LEN16 0xff #define V_FW_SCSI_TGT_RSP_WR_LEN16(x) ((x) << S_FW_SCSI_TGT_RSP_WR_LEN16) #define G_FW_SCSI_TGT_RSP_WR_LEN16(x) \ (((x) >> S_FW_SCSI_TGT_RSP_WR_LEN16) & M_FW_SCSI_TGT_RSP_WR_LEN16) #define S_FW_SCSI_TGT_RSP_WR_CP_EN 6 #define M_FW_SCSI_TGT_RSP_WR_CP_EN 0x3 #define V_FW_SCSI_TGT_RSP_WR_CP_EN(x) ((x) << S_FW_SCSI_TGT_RSP_WR_CP_EN) #define G_FW_SCSI_TGT_RSP_WR_CP_EN(x) \ (((x) >> S_FW_SCSI_TGT_RSP_WR_CP_EN) & M_FW_SCSI_TGT_RSP_WR_CP_EN) #define S_FW_SCSI_TGT_RSP_WR_CLASS 4 #define M_FW_SCSI_TGT_RSP_WR_CLASS 0x3 #define V_FW_SCSI_TGT_RSP_WR_CLASS(x) ((x) << S_FW_SCSI_TGT_RSP_WR_CLASS) #define G_FW_SCSI_TGT_RSP_WR_CLASS(x) \ (((x) >> S_FW_SCSI_TGT_RSP_WR_CLASS) & M_FW_SCSI_TGT_RSP_WR_CLASS) struct fw_pofcoe_tcb_wr { __be32 op_compl; __be32 equiq_to_len16; __be32 r4; __be32 xfer_len; __be32 tid_to_port; __be16 x_id; __be16 vlan_id; __be64 cookie; __be32 s_id; __be32 d_id; __be32 tag; __be16 r6; __be16 iqid; }; #define S_FW_POFCOE_TCB_WR_TID 12 #define M_FW_POFCOE_TCB_WR_TID 0xfffff #define V_FW_POFCOE_TCB_WR_TID(x) ((x) << S_FW_POFCOE_TCB_WR_TID) #define G_FW_POFCOE_TCB_WR_TID(x) \ (((x) >> S_FW_POFCOE_TCB_WR_TID) & M_FW_POFCOE_TCB_WR_TID) #define S_FW_POFCOE_TCB_WR_ALLOC 4 #define M_FW_POFCOE_TCB_WR_ALLOC 0x1 #define V_FW_POFCOE_TCB_WR_ALLOC(x) ((x) << S_FW_POFCOE_TCB_WR_ALLOC) #define G_FW_POFCOE_TCB_WR_ALLOC(x) \ (((x) >> S_FW_POFCOE_TCB_WR_ALLOC) & M_FW_POFCOE_TCB_WR_ALLOC) #define F_FW_POFCOE_TCB_WR_ALLOC V_FW_POFCOE_TCB_WR_ALLOC(1U) #define S_FW_POFCOE_TCB_WR_FREE 3 #define M_FW_POFCOE_TCB_WR_FREE 0x1 #define V_FW_POFCOE_TCB_WR_FREE(x) ((x) << S_FW_POFCOE_TCB_WR_FREE) #define G_FW_POFCOE_TCB_WR_FREE(x) \ (((x) >> S_FW_POFCOE_TCB_WR_FREE) & M_FW_POFCOE_TCB_WR_FREE) #define F_FW_POFCOE_TCB_WR_FREE V_FW_POFCOE_TCB_WR_FREE(1U) #define S_FW_POFCOE_TCB_WR_PORT 0 #define M_FW_POFCOE_TCB_WR_PORT 0x7 #define V_FW_POFCOE_TCB_WR_PORT(x) ((x) << S_FW_POFCOE_TCB_WR_PORT) #define G_FW_POFCOE_TCB_WR_PORT(x) \ (((x) >> S_FW_POFCOE_TCB_WR_PORT) & M_FW_POFCOE_TCB_WR_PORT) struct fw_pofcoe_ulptx_wr { __be32 op_pkd; __be32 equiq_to_len16; __u64 cookie; }; /******************************************************************* * T10 DIF related definition *******************************************************************/ struct fw_tx_pi_header { __be16 op_to_inline; __u8 pi_interval_tag_type; __u8 num_pi; __be32 pi_start4_pi_end4; __u8 tag_gen_enabled_pkd; __u8 num_pi_dsg; __be16 app_tag; __be32 ref_tag; }; #define S_FW_TX_PI_HEADER_OP 8 #define M_FW_TX_PI_HEADER_OP 0xff #define V_FW_TX_PI_HEADER_OP(x) ((x) << S_FW_TX_PI_HEADER_OP) #define G_FW_TX_PI_HEADER_OP(x) \ (((x) >> S_FW_TX_PI_HEADER_OP) & M_FW_TX_PI_HEADER_OP) #define S_FW_TX_PI_HEADER_ULPTXMORE 7 #define M_FW_TX_PI_HEADER_ULPTXMORE 0x1 #define V_FW_TX_PI_HEADER_ULPTXMORE(x) ((x) << S_FW_TX_PI_HEADER_ULPTXMORE) #define G_FW_TX_PI_HEADER_ULPTXMORE(x) \ (((x) >> S_FW_TX_PI_HEADER_ULPTXMORE) & M_FW_TX_PI_HEADER_ULPTXMORE) #define F_FW_TX_PI_HEADER_ULPTXMORE V_FW_TX_PI_HEADER_ULPTXMORE(1U) #define S_FW_TX_PI_HEADER_PI_CONTROL 4 #define M_FW_TX_PI_HEADER_PI_CONTROL 0x7 #define V_FW_TX_PI_HEADER_PI_CONTROL(x) ((x) << S_FW_TX_PI_HEADER_PI_CONTROL) #define G_FW_TX_PI_HEADER_PI_CONTROL(x) \ (((x) >> S_FW_TX_PI_HEADER_PI_CONTROL) & M_FW_TX_PI_HEADER_PI_CONTROL) #define S_FW_TX_PI_HEADER_GUARD_TYPE 2 #define M_FW_TX_PI_HEADER_GUARD_TYPE 0x1 #define V_FW_TX_PI_HEADER_GUARD_TYPE(x) ((x) << S_FW_TX_PI_HEADER_GUARD_TYPE) #define G_FW_TX_PI_HEADER_GUARD_TYPE(x) \ (((x) >> S_FW_TX_PI_HEADER_GUARD_TYPE) & M_FW_TX_PI_HEADER_GUARD_TYPE) #define F_FW_TX_PI_HEADER_GUARD_TYPE V_FW_TX_PI_HEADER_GUARD_TYPE(1U) #define S_FW_TX_PI_HEADER_VALIDATE 1 #define M_FW_TX_PI_HEADER_VALIDATE 0x1 #define V_FW_TX_PI_HEADER_VALIDATE(x) ((x) << S_FW_TX_PI_HEADER_VALIDATE) #define G_FW_TX_PI_HEADER_VALIDATE(x) \ (((x) >> S_FW_TX_PI_HEADER_VALIDATE) & M_FW_TX_PI_HEADER_VALIDATE) #define F_FW_TX_PI_HEADER_VALIDATE V_FW_TX_PI_HEADER_VALIDATE(1U) #define S_FW_TX_PI_HEADER_INLINE 0 #define M_FW_TX_PI_HEADER_INLINE 0x1 #define V_FW_TX_PI_HEADER_INLINE(x) ((x) << S_FW_TX_PI_HEADER_INLINE) #define G_FW_TX_PI_HEADER_INLINE(x) \ (((x) >> S_FW_TX_PI_HEADER_INLINE) & M_FW_TX_PI_HEADER_INLINE) #define F_FW_TX_PI_HEADER_INLINE V_FW_TX_PI_HEADER_INLINE(1U) #define S_FW_TX_PI_HEADER_PI_INTERVAL 7 #define M_FW_TX_PI_HEADER_PI_INTERVAL 0x1 #define V_FW_TX_PI_HEADER_PI_INTERVAL(x) \ ((x) << S_FW_TX_PI_HEADER_PI_INTERVAL) #define G_FW_TX_PI_HEADER_PI_INTERVAL(x) \ (((x) >> S_FW_TX_PI_HEADER_PI_INTERVAL) & M_FW_TX_PI_HEADER_PI_INTERVAL) #define F_FW_TX_PI_HEADER_PI_INTERVAL V_FW_TX_PI_HEADER_PI_INTERVAL(1U) #define S_FW_TX_PI_HEADER_TAG_TYPE 5 #define M_FW_TX_PI_HEADER_TAG_TYPE 0x3 #define V_FW_TX_PI_HEADER_TAG_TYPE(x) ((x) << S_FW_TX_PI_HEADER_TAG_TYPE) #define G_FW_TX_PI_HEADER_TAG_TYPE(x) \ (((x) >> S_FW_TX_PI_HEADER_TAG_TYPE) & M_FW_TX_PI_HEADER_TAG_TYPE) #define S_FW_TX_PI_HEADER_PI_START4 22 #define M_FW_TX_PI_HEADER_PI_START4 0x3ff #define V_FW_TX_PI_HEADER_PI_START4(x) ((x) << S_FW_TX_PI_HEADER_PI_START4) #define G_FW_TX_PI_HEADER_PI_START4(x) \ (((x) >> S_FW_TX_PI_HEADER_PI_START4) & M_FW_TX_PI_HEADER_PI_START4) #define S_FW_TX_PI_HEADER_PI_END4 0 #define M_FW_TX_PI_HEADER_PI_END4 0x3fffff #define V_FW_TX_PI_HEADER_PI_END4(x) ((x) << S_FW_TX_PI_HEADER_PI_END4) #define G_FW_TX_PI_HEADER_PI_END4(x) \ (((x) >> S_FW_TX_PI_HEADER_PI_END4) & M_FW_TX_PI_HEADER_PI_END4) #define S_FW_TX_PI_HEADER_TAG_GEN_ENABLED 6 #define M_FW_TX_PI_HEADER_TAG_GEN_ENABLED 0x3 #define V_FW_TX_PI_HEADER_TAG_GEN_ENABLED(x) \ ((x) << S_FW_TX_PI_HEADER_TAG_GEN_ENABLED) #define G_FW_TX_PI_HEADER_TAG_GEN_ENABLED(x) \ (((x) >> S_FW_TX_PI_HEADER_TAG_GEN_ENABLED) & \ M_FW_TX_PI_HEADER_TAG_GEN_ENABLED) enum fw_pi_error_type { FW_PI_ERROR_GUARD_CHECK_FAILED = 0, }; struct fw_pi_error { __be32 err_type_pkd; __be32 flowid_len16; __be16 r2; __be16 app_tag; __be32 ref_tag; __be32 pisc[4]; }; #define S_FW_PI_ERROR_ERR_TYPE 24 #define M_FW_PI_ERROR_ERR_TYPE 0xff #define V_FW_PI_ERROR_ERR_TYPE(x) ((x) << S_FW_PI_ERROR_ERR_TYPE) #define G_FW_PI_ERROR_ERR_TYPE(x) \ (((x) >> S_FW_PI_ERROR_ERR_TYPE) & M_FW_PI_ERROR_ERR_TYPE) struct fw_tlstx_data_wr { __be32 op_to_immdlen; __be32 flowid_len16; __be32 plen; __be32 lsodisable_to_flags; __be32 r5; __be32 ctxloc_to_exp; __be16 mfs; __be16 adjustedplen_pkd; __be16 expinplenmax_pkd; __u8 pdusinplenmax_pkd; __u8 r10; }; #define S_FW_TLSTX_DATA_WR_OPCODE 24 #define M_FW_TLSTX_DATA_WR_OPCODE 0xff #define V_FW_TLSTX_DATA_WR_OPCODE(x) ((x) << S_FW_TLSTX_DATA_WR_OPCODE) #define G_FW_TLSTX_DATA_WR_OPCODE(x) \ (((x) >> S_FW_TLSTX_DATA_WR_OPCODE) & M_FW_TLSTX_DATA_WR_OPCODE) #define S_FW_TLSTX_DATA_WR_COMPL 21 #define M_FW_TLSTX_DATA_WR_COMPL 0x1 #define V_FW_TLSTX_DATA_WR_COMPL(x) ((x) << S_FW_TLSTX_DATA_WR_COMPL) #define G_FW_TLSTX_DATA_WR_COMPL(x) \ (((x) >> S_FW_TLSTX_DATA_WR_COMPL) & M_FW_TLSTX_DATA_WR_COMPL) #define F_FW_TLSTX_DATA_WR_COMPL V_FW_TLSTX_DATA_WR_COMPL(1U) #define S_FW_TLSTX_DATA_WR_IMMDLEN 0 #define M_FW_TLSTX_DATA_WR_IMMDLEN 0xff #define V_FW_TLSTX_DATA_WR_IMMDLEN(x) ((x) << S_FW_TLSTX_DATA_WR_IMMDLEN) #define G_FW_TLSTX_DATA_WR_IMMDLEN(x) \ (((x) >> S_FW_TLSTX_DATA_WR_IMMDLEN) & M_FW_TLSTX_DATA_WR_IMMDLEN) #define S_FW_TLSTX_DATA_WR_FLOWID 8 #define M_FW_TLSTX_DATA_WR_FLOWID 0xfffff #define V_FW_TLSTX_DATA_WR_FLOWID(x) ((x) << S_FW_TLSTX_DATA_WR_FLOWID) #define G_FW_TLSTX_DATA_WR_FLOWID(x) \ (((x) >> S_FW_TLSTX_DATA_WR_FLOWID) & M_FW_TLSTX_DATA_WR_FLOWID) #define S_FW_TLSTX_DATA_WR_LEN16 0 #define M_FW_TLSTX_DATA_WR_LEN16 0xff #define V_FW_TLSTX_DATA_WR_LEN16(x) ((x) << S_FW_TLSTX_DATA_WR_LEN16) #define G_FW_TLSTX_DATA_WR_LEN16(x) \ (((x) >> S_FW_TLSTX_DATA_WR_LEN16) & M_FW_TLSTX_DATA_WR_LEN16) #define S_FW_TLSTX_DATA_WR_LSODISABLE 31 #define M_FW_TLSTX_DATA_WR_LSODISABLE 0x1 #define V_FW_TLSTX_DATA_WR_LSODISABLE(x) \ ((x) << S_FW_TLSTX_DATA_WR_LSODISABLE) #define G_FW_TLSTX_DATA_WR_LSODISABLE(x) \ (((x) >> S_FW_TLSTX_DATA_WR_LSODISABLE) & M_FW_TLSTX_DATA_WR_LSODISABLE) #define F_FW_TLSTX_DATA_WR_LSODISABLE V_FW_TLSTX_DATA_WR_LSODISABLE(1U) #define S_FW_TLSTX_DATA_WR_ALIGNPLD 30 #define M_FW_TLSTX_DATA_WR_ALIGNPLD 0x1 #define V_FW_TLSTX_DATA_WR_ALIGNPLD(x) ((x) << S_FW_TLSTX_DATA_WR_ALIGNPLD) #define G_FW_TLSTX_DATA_WR_ALIGNPLD(x) \ (((x) >> S_FW_TLSTX_DATA_WR_ALIGNPLD) & M_FW_TLSTX_DATA_WR_ALIGNPLD) #define F_FW_TLSTX_DATA_WR_ALIGNPLD V_FW_TLSTX_DATA_WR_ALIGNPLD(1U) #define S_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE 29 #define M_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE 0x1 #define V_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE(x) \ ((x) << S_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE) #define G_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE(x) \ (((x) >> S_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE) & \ M_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE) #define F_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE V_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE(1U) #define S_FW_TLSTX_DATA_WR_FLAGS 0 #define M_FW_TLSTX_DATA_WR_FLAGS 0xfffffff #define V_FW_TLSTX_DATA_WR_FLAGS(x) ((x) << S_FW_TLSTX_DATA_WR_FLAGS) #define G_FW_TLSTX_DATA_WR_FLAGS(x) \ (((x) >> S_FW_TLSTX_DATA_WR_FLAGS) & M_FW_TLSTX_DATA_WR_FLAGS) #define S_FW_TLSTX_DATA_WR_CTXLOC 30 #define M_FW_TLSTX_DATA_WR_CTXLOC 0x3 #define V_FW_TLSTX_DATA_WR_CTXLOC(x) ((x) << S_FW_TLSTX_DATA_WR_CTXLOC) #define G_FW_TLSTX_DATA_WR_CTXLOC(x) \ (((x) >> S_FW_TLSTX_DATA_WR_CTXLOC) & M_FW_TLSTX_DATA_WR_CTXLOC) #define S_FW_TLSTX_DATA_WR_IVDSGL 29 #define M_FW_TLSTX_DATA_WR_IVDSGL 0x1 #define V_FW_TLSTX_DATA_WR_IVDSGL(x) ((x) << S_FW_TLSTX_DATA_WR_IVDSGL) #define G_FW_TLSTX_DATA_WR_IVDSGL(x) \ (((x) >> S_FW_TLSTX_DATA_WR_IVDSGL) & M_FW_TLSTX_DATA_WR_IVDSGL) #define F_FW_TLSTX_DATA_WR_IVDSGL V_FW_TLSTX_DATA_WR_IVDSGL(1U) #define S_FW_TLSTX_DATA_WR_KEYSIZE 24 #define M_FW_TLSTX_DATA_WR_KEYSIZE 0x1f #define V_FW_TLSTX_DATA_WR_KEYSIZE(x) ((x) << S_FW_TLSTX_DATA_WR_KEYSIZE) #define G_FW_TLSTX_DATA_WR_KEYSIZE(x) \ (((x) >> S_FW_TLSTX_DATA_WR_KEYSIZE) & M_FW_TLSTX_DATA_WR_KEYSIZE) #define S_FW_TLSTX_DATA_WR_NUMIVS 14 #define M_FW_TLSTX_DATA_WR_NUMIVS 0xff #define V_FW_TLSTX_DATA_WR_NUMIVS(x) ((x) << S_FW_TLSTX_DATA_WR_NUMIVS) #define G_FW_TLSTX_DATA_WR_NUMIVS(x) \ (((x) >> S_FW_TLSTX_DATA_WR_NUMIVS) & M_FW_TLSTX_DATA_WR_NUMIVS) #define S_FW_TLSTX_DATA_WR_EXP 0 #define M_FW_TLSTX_DATA_WR_EXP 0x3fff #define V_FW_TLSTX_DATA_WR_EXP(x) ((x) << S_FW_TLSTX_DATA_WR_EXP) #define G_FW_TLSTX_DATA_WR_EXP(x) \ (((x) >> S_FW_TLSTX_DATA_WR_EXP) & M_FW_TLSTX_DATA_WR_EXP) #define S_FW_TLSTX_DATA_WR_ADJUSTEDPLEN 1 #define M_FW_TLSTX_DATA_WR_ADJUSTEDPLEN 0x7fff #define V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(x) \ ((x) << S_FW_TLSTX_DATA_WR_ADJUSTEDPLEN) #define G_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(x) \ (((x) >> S_FW_TLSTX_DATA_WR_ADJUSTEDPLEN) & \ M_FW_TLSTX_DATA_WR_ADJUSTEDPLEN) #define S_FW_TLSTX_DATA_WR_EXPINPLENMAX 4 #define M_FW_TLSTX_DATA_WR_EXPINPLENMAX 0xfff #define V_FW_TLSTX_DATA_WR_EXPINPLENMAX(x) \ ((x) << S_FW_TLSTX_DATA_WR_EXPINPLENMAX) #define G_FW_TLSTX_DATA_WR_EXPINPLENMAX(x) \ (((x) >> S_FW_TLSTX_DATA_WR_EXPINPLENMAX) & \ M_FW_TLSTX_DATA_WR_EXPINPLENMAX) #define S_FW_TLSTX_DATA_WR_PDUSINPLENMAX 2 #define M_FW_TLSTX_DATA_WR_PDUSINPLENMAX 0x3f #define V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(x) \ ((x) << S_FW_TLSTX_DATA_WR_PDUSINPLENMAX) #define G_FW_TLSTX_DATA_WR_PDUSINPLENMAX(x) \ (((x) >> S_FW_TLSTX_DATA_WR_PDUSINPLENMAX) & \ M_FW_TLSTX_DATA_WR_PDUSINPLENMAX) struct fw_crypto_lookaside_wr { __be32 op_to_cctx_size; __be32 len16_pkd; __be32 session_id; __be32 rx_chid_to_rx_q_id; __be32 key_addr; __be32 pld_size_hash_size; __be64 cookie; }; #define S_FW_CRYPTO_LOOKASIDE_WR_OPCODE 24 #define M_FW_CRYPTO_LOOKASIDE_WR_OPCODE 0xff #define V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_OPCODE) #define G_FW_CRYPTO_LOOKASIDE_WR_OPCODE(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_OPCODE) & \ M_FW_CRYPTO_LOOKASIDE_WR_OPCODE) #define S_FW_CRYPTO_LOOKASIDE_WR_COMPL 23 #define M_FW_CRYPTO_LOOKASIDE_WR_COMPL 0x1 #define V_FW_CRYPTO_LOOKASIDE_WR_COMPL(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_COMPL) #define G_FW_CRYPTO_LOOKASIDE_WR_COMPL(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_COMPL) & \ M_FW_CRYPTO_LOOKASIDE_WR_COMPL) #define F_FW_CRYPTO_LOOKASIDE_WR_COMPL V_FW_CRYPTO_LOOKASIDE_WR_COMPL(1U) #define S_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN 15 #define M_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN 0xff #define V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN) #define G_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN) & \ M_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN) #define S_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC 5 #define M_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC 0x3 #define V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC) #define G_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC) & \ M_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC) #define S_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE 0 #define M_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE 0x1f #define V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE) #define G_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE) & \ M_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE) #define S_FW_CRYPTO_LOOKASIDE_WR_LEN16 0 #define M_FW_CRYPTO_LOOKASIDE_WR_LEN16 0xff #define V_FW_CRYPTO_LOOKASIDE_WR_LEN16(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_LEN16) #define G_FW_CRYPTO_LOOKASIDE_WR_LEN16(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_LEN16) & \ M_FW_CRYPTO_LOOKASIDE_WR_LEN16) #define S_FW_CRYPTO_LOOKASIDE_WR_RX_CHID 29 #define M_FW_CRYPTO_LOOKASIDE_WR_RX_CHID 0x3 #define V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_RX_CHID) #define G_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_RX_CHID) & \ M_FW_CRYPTO_LOOKASIDE_WR_RX_CHID) #define S_FW_CRYPTO_LOOKASIDE_WR_LCB 27 #define M_FW_CRYPTO_LOOKASIDE_WR_LCB 0x3 #define V_FW_CRYPTO_LOOKASIDE_WR_LCB(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_LCB) #define G_FW_CRYPTO_LOOKASIDE_WR_LCB(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_LCB) & M_FW_CRYPTO_LOOKASIDE_WR_LCB) #define S_FW_CRYPTO_LOOKASIDE_WR_PHASH 25 #define M_FW_CRYPTO_LOOKASIDE_WR_PHASH 0x3 #define V_FW_CRYPTO_LOOKASIDE_WR_PHASH(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_PHASH) #define G_FW_CRYPTO_LOOKASIDE_WR_PHASH(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_PHASH) & \ M_FW_CRYPTO_LOOKASIDE_WR_PHASH) #define S_FW_CRYPTO_LOOKASIDE_WR_IV 23 #define M_FW_CRYPTO_LOOKASIDE_WR_IV 0x3 #define V_FW_CRYPTO_LOOKASIDE_WR_IV(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_IV) #define G_FW_CRYPTO_LOOKASIDE_WR_IV(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_IV) & M_FW_CRYPTO_LOOKASIDE_WR_IV) #define S_FW_CRYPTO_LOOKASIDE_WR_FQIDX 15 #define M_FW_CRYPTO_LOOKASIDE_WR_FQIDX 0xff #define V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_FQIDX) #define G_FW_CRYPTO_LOOKASIDE_WR_FQIDX(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_FQIDX) &\ M_FW_CRYPTO_LOOKASIDE_WR_FQIDX) #define S_FW_CRYPTO_LOOKASIDE_WR_TX_CH 10 #define M_FW_CRYPTO_LOOKASIDE_WR_TX_CH 0x3 #define V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_TX_CH) #define G_FW_CRYPTO_LOOKASIDE_WR_TX_CH(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_TX_CH) & \ M_FW_CRYPTO_LOOKASIDE_WR_TX_CH) #define S_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID 0 #define M_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID 0x3ff #define V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID) #define G_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID) & \ M_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID) #define S_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE 24 #define M_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE 0xff #define V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE) #define G_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE) & \ M_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE) #define S_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE 17 #define M_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE 0x7f #define V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(x) \ ((x) << S_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE) #define G_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(x) \ (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE) & \ M_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE) struct fw_tls_tunnel_ofld_wr { __be32 op_compl; __be32 flowid_len16; __be32 plen; __be32 r4; }; /****************************************************************************** * C O M M A N D s *********************/ /* * The maximum length of time, in miliseconds, that we expect any firmware * command to take to execute and return a reply to the host. The RESET * and INITIALIZE commands can take a fair amount of time to execute but * most execute in far less time than this maximum. This constant is used * by host software to determine how long to wait for a firmware command * reply before declaring the firmware as dead/unreachable ... */ #define FW_CMD_MAX_TIMEOUT 10000 /* * If a host driver does a HELLO and discovers that there's already a MASTER * selected, we may have to wait for that MASTER to finish issuing RESET, * configuration and INITIALIZE commands. Also, there's a possibility that * our own HELLO may get lost if it happens right as the MASTER is issuign a * RESET command, so we need to be willing to make a few retries of our HELLO. */ #define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT) #define FW_CMD_HELLO_RETRIES 3 enum fw_cmd_opcodes { FW_LDST_CMD = 0x01, FW_RESET_CMD = 0x03, FW_HELLO_CMD = 0x04, FW_BYE_CMD = 0x05, FW_INITIALIZE_CMD = 0x06, FW_CAPS_CONFIG_CMD = 0x07, FW_PARAMS_CMD = 0x08, FW_PFVF_CMD = 0x09, FW_IQ_CMD = 0x10, FW_EQ_MNGT_CMD = 0x11, FW_EQ_ETH_CMD = 0x12, FW_EQ_CTRL_CMD = 0x13, FW_EQ_OFLD_CMD = 0x21, FW_VI_CMD = 0x14, FW_VI_MAC_CMD = 0x15, FW_VI_RXMODE_CMD = 0x16, FW_VI_ENABLE_CMD = 0x17, FW_VI_STATS_CMD = 0x1a, FW_ACL_MAC_CMD = 0x18, FW_ACL_VLAN_CMD = 0x19, FW_PORT_CMD = 0x1b, FW_PORT_STATS_CMD = 0x1c, FW_PORT_LB_STATS_CMD = 0x1d, FW_PORT_TRACE_CMD = 0x1e, FW_PORT_TRACE_MMAP_CMD = 0x1f, FW_RSS_IND_TBL_CMD = 0x20, FW_RSS_GLB_CONFIG_CMD = 0x22, FW_RSS_VI_CONFIG_CMD = 0x23, FW_SCHED_CMD = 0x24, FW_DEVLOG_CMD = 0x25, FW_WATCHDOG_CMD = 0x27, FW_CLIP_CMD = 0x28, FW_CLIP2_CMD = 0x29, FW_CHNET_IFACE_CMD = 0x26, FW_FCOE_RES_INFO_CMD = 0x31, FW_FCOE_LINK_CMD = 0x32, FW_FCOE_VNP_CMD = 0x33, FW_FCOE_SPARAMS_CMD = 0x35, FW_FCOE_STATS_CMD = 0x37, FW_FCOE_FCF_CMD = 0x38, FW_DCB_IEEE_CMD = 0x3a, FW_DIAG_CMD = 0x3d, FW_PTP_CMD = 0x3e, FW_HMA_CMD = 0x3f, FW_LASTC2E_CMD = 0x40, FW_ERROR_CMD = 0x80, FW_DEBUG_CMD = 0x81, }; enum fw_cmd_cap { FW_CMD_CAP_PF = 0x01, FW_CMD_CAP_DMAQ = 0x02, FW_CMD_CAP_PORT = 0x04, FW_CMD_CAP_PORTPROMISC = 0x08, FW_CMD_CAP_PORTSTATS = 0x10, FW_CMD_CAP_VF = 0x80, }; /* * Generic command header flit0 */ struct fw_cmd_hdr { __be32 hi; __be32 lo; }; #define S_FW_CMD_OP 24 #define M_FW_CMD_OP 0xff #define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP) #define G_FW_CMD_OP(x) (((x) >> S_FW_CMD_OP) & M_FW_CMD_OP) #define S_FW_CMD_REQUEST 23 #define M_FW_CMD_REQUEST 0x1 #define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST) #define G_FW_CMD_REQUEST(x) (((x) >> S_FW_CMD_REQUEST) & M_FW_CMD_REQUEST) #define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U) #define S_FW_CMD_READ 22 #define M_FW_CMD_READ 0x1 #define V_FW_CMD_READ(x) ((x) << S_FW_CMD_READ) #define G_FW_CMD_READ(x) (((x) >> S_FW_CMD_READ) & M_FW_CMD_READ) #define F_FW_CMD_READ V_FW_CMD_READ(1U) #define S_FW_CMD_WRITE 21 #define M_FW_CMD_WRITE 0x1 #define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE) #define G_FW_CMD_WRITE(x) (((x) >> S_FW_CMD_WRITE) & M_FW_CMD_WRITE) #define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U) #define S_FW_CMD_EXEC 20 #define M_FW_CMD_EXEC 0x1 #define V_FW_CMD_EXEC(x) ((x) << S_FW_CMD_EXEC) #define G_FW_CMD_EXEC(x) (((x) >> S_FW_CMD_EXEC) & M_FW_CMD_EXEC) #define F_FW_CMD_EXEC V_FW_CMD_EXEC(1U) #define S_FW_CMD_RAMASK 20 #define M_FW_CMD_RAMASK 0xf #define V_FW_CMD_RAMASK(x) ((x) << S_FW_CMD_RAMASK) #define G_FW_CMD_RAMASK(x) (((x) >> S_FW_CMD_RAMASK) & M_FW_CMD_RAMASK) #define S_FW_CMD_RETVAL 8 #define M_FW_CMD_RETVAL 0xff #define V_FW_CMD_RETVAL(x) ((x) << S_FW_CMD_RETVAL) #define G_FW_CMD_RETVAL(x) (((x) >> S_FW_CMD_RETVAL) & M_FW_CMD_RETVAL) #define S_FW_CMD_LEN16 0 #define M_FW_CMD_LEN16 0xff #define V_FW_CMD_LEN16(x) ((x) << S_FW_CMD_LEN16) #define G_FW_CMD_LEN16(x) (((x) >> S_FW_CMD_LEN16) & M_FW_CMD_LEN16) #define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16) /* * address spaces */ enum fw_ldst_addrspc { FW_LDST_ADDRSPC_FIRMWARE = 0x0001, FW_LDST_ADDRSPC_SGE_EGRC = 0x0008, FW_LDST_ADDRSPC_SGE_INGC = 0x0009, FW_LDST_ADDRSPC_SGE_FLMC = 0x000a, FW_LDST_ADDRSPC_SGE_CONMC = 0x000b, FW_LDST_ADDRSPC_TP_PIO = 0x0010, FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011, FW_LDST_ADDRSPC_TP_MIB = 0x0012, FW_LDST_ADDRSPC_MDIO = 0x0018, FW_LDST_ADDRSPC_MPS = 0x0020, FW_LDST_ADDRSPC_FUNC = 0x0028, FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029, FW_LDST_ADDRSPC_FUNC_I2C = 0x002A, /* legacy */ FW_LDST_ADDRSPC_LE = 0x0030, FW_LDST_ADDRSPC_I2C = 0x0038, FW_LDST_ADDRSPC_PCIE_CFGS = 0x0040, FW_LDST_ADDRSPC_PCIE_DBG = 0x0041, FW_LDST_ADDRSPC_PCIE_PHY = 0x0042, FW_LDST_ADDRSPC_CIM_Q = 0x0048, }; /* * MDIO VSC8634 register access control field */ enum fw_ldst_mdio_vsc8634_aid { FW_LDST_MDIO_VS_STANDARD, FW_LDST_MDIO_VS_EXTENDED, FW_LDST_MDIO_VS_GPIO }; enum fw_ldst_mps_fid { FW_LDST_MPS_ATRB, FW_LDST_MPS_RPLC }; enum fw_ldst_func_access_ctl { FW_LDST_FUNC_ACC_CTL_VIID, FW_LDST_FUNC_ACC_CTL_FID }; enum fw_ldst_func_mod_index { FW_LDST_FUNC_MPS }; struct fw_ldst_cmd { __be32 op_to_addrspace; __be32 cycles_to_len16; union fw_ldst { struct fw_ldst_addrval { __be32 addr; __be32 val; } addrval; struct fw_ldst_idctxt { __be32 physid; __be32 msg_ctxtflush; __be32 ctxt_data7; __be32 ctxt_data6; __be32 ctxt_data5; __be32 ctxt_data4; __be32 ctxt_data3; __be32 ctxt_data2; __be32 ctxt_data1; __be32 ctxt_data0; } idctxt; struct fw_ldst_mdio { __be16 paddr_mmd; __be16 raddr; __be16 vctl; __be16 rval; } mdio; struct fw_ldst_cim_rq { __u8 req_first64[8]; __u8 req_second64[8]; __u8 resp_first64[8]; __u8 resp_second64[8]; __be32 r3[2]; } cim_rq; union fw_ldst_mps { struct fw_ldst_mps_rplc { __be16 fid_idx; __be16 rplcpf_pkd; __be32 rplc255_224; __be32 rplc223_192; __be32 rplc191_160; __be32 rplc159_128; __be32 rplc127_96; __be32 rplc95_64; __be32 rplc63_32; __be32 rplc31_0; } rplc; struct fw_ldst_mps_atrb { __be16 fid_mpsid; __be16 r2[3]; __be32 r3[2]; __be32 r4; __be32 atrb; __be16 vlan[16]; } atrb; } mps; struct fw_ldst_func { __u8 access_ctl; __u8 mod_index; __be16 ctl_id; __be32 offset; __be64 data0; __be64 data1; } func; struct fw_ldst_pcie { __u8 ctrl_to_fn; __u8 bnum; __u8 r; __u8 ext_r; __u8 select_naccess; __u8 pcie_fn; __be16 nset_pkd; __be32 data[12]; } pcie; struct fw_ldst_i2c_deprecated { __u8 pid_pkd; __u8 base; __u8 boffset; __u8 data; __be32 r9; } i2c_deprecated; struct fw_ldst_i2c { __u8 pid; __u8 did; __u8 boffset; __u8 blen; __be32 r9; __u8 data[48]; } i2c; struct fw_ldst_le { __be32 index; __be32 r9; __u8 val[33]; __u8 r11[7]; } le; } u; }; #define S_FW_LDST_CMD_ADDRSPACE 0 #define M_FW_LDST_CMD_ADDRSPACE 0xff #define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE) #define G_FW_LDST_CMD_ADDRSPACE(x) \ (((x) >> S_FW_LDST_CMD_ADDRSPACE) & M_FW_LDST_CMD_ADDRSPACE) #define S_FW_LDST_CMD_CYCLES 16 #define M_FW_LDST_CMD_CYCLES 0xffff #define V_FW_LDST_CMD_CYCLES(x) ((x) << S_FW_LDST_CMD_CYCLES) #define G_FW_LDST_CMD_CYCLES(x) \ (((x) >> S_FW_LDST_CMD_CYCLES) & M_FW_LDST_CMD_CYCLES) #define S_FW_LDST_CMD_MSG 31 #define M_FW_LDST_CMD_MSG 0x1 #define V_FW_LDST_CMD_MSG(x) ((x) << S_FW_LDST_CMD_MSG) #define G_FW_LDST_CMD_MSG(x) \ (((x) >> S_FW_LDST_CMD_MSG) & M_FW_LDST_CMD_MSG) #define F_FW_LDST_CMD_MSG V_FW_LDST_CMD_MSG(1U) #define S_FW_LDST_CMD_CTXTFLUSH 30 #define M_FW_LDST_CMD_CTXTFLUSH 0x1 #define V_FW_LDST_CMD_CTXTFLUSH(x) ((x) << S_FW_LDST_CMD_CTXTFLUSH) #define G_FW_LDST_CMD_CTXTFLUSH(x) \ (((x) >> S_FW_LDST_CMD_CTXTFLUSH) & M_FW_LDST_CMD_CTXTFLUSH) #define F_FW_LDST_CMD_CTXTFLUSH V_FW_LDST_CMD_CTXTFLUSH(1U) #define S_FW_LDST_CMD_PADDR 8 #define M_FW_LDST_CMD_PADDR 0x1f #define V_FW_LDST_CMD_PADDR(x) ((x) << S_FW_LDST_CMD_PADDR) #define G_FW_LDST_CMD_PADDR(x) \ (((x) >> S_FW_LDST_CMD_PADDR) & M_FW_LDST_CMD_PADDR) #define S_FW_LDST_CMD_MMD 0 #define M_FW_LDST_CMD_MMD 0x1f #define V_FW_LDST_CMD_MMD(x) ((x) << S_FW_LDST_CMD_MMD) #define G_FW_LDST_CMD_MMD(x) \ (((x) >> S_FW_LDST_CMD_MMD) & M_FW_LDST_CMD_MMD) #define S_FW_LDST_CMD_FID 15 #define M_FW_LDST_CMD_FID 0x1 #define V_FW_LDST_CMD_FID(x) ((x) << S_FW_LDST_CMD_FID) #define G_FW_LDST_CMD_FID(x) \ (((x) >> S_FW_LDST_CMD_FID) & M_FW_LDST_CMD_FID) #define F_FW_LDST_CMD_FID V_FW_LDST_CMD_FID(1U) #define S_FW_LDST_CMD_IDX 0 #define M_FW_LDST_CMD_IDX 0x7fff #define V_FW_LDST_CMD_IDX(x) ((x) << S_FW_LDST_CMD_IDX) #define G_FW_LDST_CMD_IDX(x) \ (((x) >> S_FW_LDST_CMD_IDX) & M_FW_LDST_CMD_IDX) #define S_FW_LDST_CMD_RPLCPF 0 #define M_FW_LDST_CMD_RPLCPF 0xff #define V_FW_LDST_CMD_RPLCPF(x) ((x) << S_FW_LDST_CMD_RPLCPF) #define G_FW_LDST_CMD_RPLCPF(x) \ (((x) >> S_FW_LDST_CMD_RPLCPF) & M_FW_LDST_CMD_RPLCPF) #define S_FW_LDST_CMD_MPSID 0 #define M_FW_LDST_CMD_MPSID 0x7fff #define V_FW_LDST_CMD_MPSID(x) ((x) << S_FW_LDST_CMD_MPSID) #define G_FW_LDST_CMD_MPSID(x) \ (((x) >> S_FW_LDST_CMD_MPSID) & M_FW_LDST_CMD_MPSID) #define S_FW_LDST_CMD_CTRL 7 #define M_FW_LDST_CMD_CTRL 0x1 #define V_FW_LDST_CMD_CTRL(x) ((x) << S_FW_LDST_CMD_CTRL) #define G_FW_LDST_CMD_CTRL(x) \ (((x) >> S_FW_LDST_CMD_CTRL) & M_FW_LDST_CMD_CTRL) #define F_FW_LDST_CMD_CTRL V_FW_LDST_CMD_CTRL(1U) #define S_FW_LDST_CMD_LC 4 #define M_FW_LDST_CMD_LC 0x1 #define V_FW_LDST_CMD_LC(x) ((x) << S_FW_LDST_CMD_LC) #define G_FW_LDST_CMD_LC(x) \ (((x) >> S_FW_LDST_CMD_LC) & M_FW_LDST_CMD_LC) #define F_FW_LDST_CMD_LC V_FW_LDST_CMD_LC(1U) #define S_FW_LDST_CMD_AI 3 #define M_FW_LDST_CMD_AI 0x1 #define V_FW_LDST_CMD_AI(x) ((x) << S_FW_LDST_CMD_AI) #define G_FW_LDST_CMD_AI(x) \ (((x) >> S_FW_LDST_CMD_AI) & M_FW_LDST_CMD_AI) #define F_FW_LDST_CMD_AI V_FW_LDST_CMD_AI(1U) #define S_FW_LDST_CMD_FN 0 #define M_FW_LDST_CMD_FN 0x7 #define V_FW_LDST_CMD_FN(x) ((x) << S_FW_LDST_CMD_FN) #define G_FW_LDST_CMD_FN(x) \ (((x) >> S_FW_LDST_CMD_FN) & M_FW_LDST_CMD_FN) #define S_FW_LDST_CMD_SELECT 4 #define M_FW_LDST_CMD_SELECT 0xf #define V_FW_LDST_CMD_SELECT(x) ((x) << S_FW_LDST_CMD_SELECT) #define G_FW_LDST_CMD_SELECT(x) \ (((x) >> S_FW_LDST_CMD_SELECT) & M_FW_LDST_CMD_SELECT) #define S_FW_LDST_CMD_NACCESS 0 #define M_FW_LDST_CMD_NACCESS 0xf #define V_FW_LDST_CMD_NACCESS(x) ((x) << S_FW_LDST_CMD_NACCESS) #define G_FW_LDST_CMD_NACCESS(x) \ (((x) >> S_FW_LDST_CMD_NACCESS) & M_FW_LDST_CMD_NACCESS) #define S_FW_LDST_CMD_NSET 14 #define M_FW_LDST_CMD_NSET 0x3 #define V_FW_LDST_CMD_NSET(x) ((x) << S_FW_LDST_CMD_NSET) #define G_FW_LDST_CMD_NSET(x) \ (((x) >> S_FW_LDST_CMD_NSET) & M_FW_LDST_CMD_NSET) #define S_FW_LDST_CMD_PID 6 #define M_FW_LDST_CMD_PID 0x3 #define V_FW_LDST_CMD_PID(x) ((x) << S_FW_LDST_CMD_PID) #define G_FW_LDST_CMD_PID(x) \ (((x) >> S_FW_LDST_CMD_PID) & M_FW_LDST_CMD_PID) struct fw_reset_cmd { __be32 op_to_write; __be32 retval_len16; __be32 val; __be32 halt_pkd; }; #define S_FW_RESET_CMD_HALT 31 #define M_FW_RESET_CMD_HALT 0x1 #define V_FW_RESET_CMD_HALT(x) ((x) << S_FW_RESET_CMD_HALT) #define G_FW_RESET_CMD_HALT(x) \ (((x) >> S_FW_RESET_CMD_HALT) & M_FW_RESET_CMD_HALT) #define F_FW_RESET_CMD_HALT V_FW_RESET_CMD_HALT(1U) enum { FW_HELLO_CMD_STAGE_OS = 0, FW_HELLO_CMD_STAGE_PREOS0 = 1, FW_HELLO_CMD_STAGE_PREOS1 = 2, FW_HELLO_CMD_STAGE_POSTOS = 3, }; struct fw_hello_cmd { __be32 op_to_write; __be32 retval_len16; __be32 err_to_clearinit; __be32 fwrev; }; #define S_FW_HELLO_CMD_ERR 31 #define M_FW_HELLO_CMD_ERR 0x1 #define V_FW_HELLO_CMD_ERR(x) ((x) << S_FW_HELLO_CMD_ERR) #define G_FW_HELLO_CMD_ERR(x) \ (((x) >> S_FW_HELLO_CMD_ERR) & M_FW_HELLO_CMD_ERR) #define F_FW_HELLO_CMD_ERR V_FW_HELLO_CMD_ERR(1U) #define S_FW_HELLO_CMD_INIT 30 #define M_FW_HELLO_CMD_INIT 0x1 #define V_FW_HELLO_CMD_INIT(x) ((x) << S_FW_HELLO_CMD_INIT) #define G_FW_HELLO_CMD_INIT(x) \ (((x) >> S_FW_HELLO_CMD_INIT) & M_FW_HELLO_CMD_INIT) #define F_FW_HELLO_CMD_INIT V_FW_HELLO_CMD_INIT(1U) #define S_FW_HELLO_CMD_MASTERDIS 29 #define M_FW_HELLO_CMD_MASTERDIS 0x1 #define V_FW_HELLO_CMD_MASTERDIS(x) ((x) << S_FW_HELLO_CMD_MASTERDIS) #define G_FW_HELLO_CMD_MASTERDIS(x) \ (((x) >> S_FW_HELLO_CMD_MASTERDIS) & M_FW_HELLO_CMD_MASTERDIS) #define F_FW_HELLO_CMD_MASTERDIS V_FW_HELLO_CMD_MASTERDIS(1U) #define S_FW_HELLO_CMD_MASTERFORCE 28 #define M_FW_HELLO_CMD_MASTERFORCE 0x1 #define V_FW_HELLO_CMD_MASTERFORCE(x) ((x) << S_FW_HELLO_CMD_MASTERFORCE) #define G_FW_HELLO_CMD_MASTERFORCE(x) \ (((x) >> S_FW_HELLO_CMD_MASTERFORCE) & M_FW_HELLO_CMD_MASTERFORCE) #define F_FW_HELLO_CMD_MASTERFORCE V_FW_HELLO_CMD_MASTERFORCE(1U) #define S_FW_HELLO_CMD_MBMASTER 24 #define M_FW_HELLO_CMD_MBMASTER 0xf #define V_FW_HELLO_CMD_MBMASTER(x) ((x) << S_FW_HELLO_CMD_MBMASTER) #define G_FW_HELLO_CMD_MBMASTER(x) \ (((x) >> S_FW_HELLO_CMD_MBMASTER) & M_FW_HELLO_CMD_MBMASTER) #define S_FW_HELLO_CMD_MBASYNCNOTINT 23 #define M_FW_HELLO_CMD_MBASYNCNOTINT 0x1 #define V_FW_HELLO_CMD_MBASYNCNOTINT(x) ((x) << S_FW_HELLO_CMD_MBASYNCNOTINT) #define G_FW_HELLO_CMD_MBASYNCNOTINT(x) \ (((x) >> S_FW_HELLO_CMD_MBASYNCNOTINT) & M_FW_HELLO_CMD_MBASYNCNOTINT) #define F_FW_HELLO_CMD_MBASYNCNOTINT V_FW_HELLO_CMD_MBASYNCNOTINT(1U) #define S_FW_HELLO_CMD_MBASYNCNOT 20 #define M_FW_HELLO_CMD_MBASYNCNOT 0x7 #define V_FW_HELLO_CMD_MBASYNCNOT(x) ((x) << S_FW_HELLO_CMD_MBASYNCNOT) #define G_FW_HELLO_CMD_MBASYNCNOT(x) \ (((x) >> S_FW_HELLO_CMD_MBASYNCNOT) & M_FW_HELLO_CMD_MBASYNCNOT) #define S_FW_HELLO_CMD_STAGE 17 #define M_FW_HELLO_CMD_STAGE 0x7 #define V_FW_HELLO_CMD_STAGE(x) ((x) << S_FW_HELLO_CMD_STAGE) #define G_FW_HELLO_CMD_STAGE(x) \ (((x) >> S_FW_HELLO_CMD_STAGE) & M_FW_HELLO_CMD_STAGE) #define S_FW_HELLO_CMD_CLEARINIT 16 #define M_FW_HELLO_CMD_CLEARINIT 0x1 #define V_FW_HELLO_CMD_CLEARINIT(x) ((x) << S_FW_HELLO_CMD_CLEARINIT) #define G_FW_HELLO_CMD_CLEARINIT(x) \ (((x) >> S_FW_HELLO_CMD_CLEARINIT) & M_FW_HELLO_CMD_CLEARINIT) #define F_FW_HELLO_CMD_CLEARINIT V_FW_HELLO_CMD_CLEARINIT(1U) struct fw_bye_cmd { __be32 op_to_write; __be32 retval_len16; __be64 r3; }; struct fw_initialize_cmd { __be32 op_to_write; __be32 retval_len16; __be64 r3; }; enum fw_caps_config_hm { FW_CAPS_CONFIG_HM_PCIE = 0x00000001, FW_CAPS_CONFIG_HM_PL = 0x00000002, FW_CAPS_CONFIG_HM_SGE = 0x00000004, FW_CAPS_CONFIG_HM_CIM = 0x00000008, FW_CAPS_CONFIG_HM_ULPTX = 0x00000010, FW_CAPS_CONFIG_HM_TP = 0x00000020, FW_CAPS_CONFIG_HM_ULPRX = 0x00000040, FW_CAPS_CONFIG_HM_PMRX = 0x00000080, FW_CAPS_CONFIG_HM_PMTX = 0x00000100, FW_CAPS_CONFIG_HM_MC = 0x00000200, FW_CAPS_CONFIG_HM_LE = 0x00000400, FW_CAPS_CONFIG_HM_MPS = 0x00000800, FW_CAPS_CONFIG_HM_XGMAC = 0x00001000, FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000, FW_CAPS_CONFIG_HM_T4DBG = 0x00004000, FW_CAPS_CONFIG_HM_MI = 0x00008000, FW_CAPS_CONFIG_HM_I2CM = 0x00010000, FW_CAPS_CONFIG_HM_NCSI = 0x00020000, FW_CAPS_CONFIG_HM_SMB = 0x00040000, FW_CAPS_CONFIG_HM_MA = 0x00080000, FW_CAPS_CONFIG_HM_EDRAM = 0x00100000, FW_CAPS_CONFIG_HM_PMU = 0x00200000, FW_CAPS_CONFIG_HM_UART = 0x00400000, FW_CAPS_CONFIG_HM_SF = 0x00800000, }; /* * The VF Register Map. * * The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local * bus module (PL) and CPU Interface Module (CIM) components are mapped via * the Slice to Module Map Table (see below) in the Physical Function Register * Map. The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base * and Offset registers in the PF Register Map. The MBDATA base address is * quite constrained as it determines the Mailbox Data addresses for both PFs * and VFs, and therefore must fit in both the VF and PF Register Maps without * overlapping other registers. */ #define FW_T4VF_SGE_BASE_ADDR 0x0000 #define FW_T4VF_MPS_BASE_ADDR 0x0100 #define FW_T4VF_PL_BASE_ADDR 0x0200 #define FW_T4VF_MBDATA_BASE_ADDR 0x0240 #define FW_T6VF_MBDATA_BASE_ADDR 0x0280 /* aligned to mbox size 128B */ #define FW_T4VF_CIM_BASE_ADDR 0x0300 #define FW_T4VF_REGMAP_START 0x0000 #define FW_T4VF_REGMAP_SIZE 0x0400 enum fw_caps_config_nbm { FW_CAPS_CONFIG_NBM_IPMI = 0x00000001, FW_CAPS_CONFIG_NBM_NCSI = 0x00000002, }; enum fw_caps_config_link { FW_CAPS_CONFIG_LINK_PPP = 0x00000001, FW_CAPS_CONFIG_LINK_QFC = 0x00000002, FW_CAPS_CONFIG_LINK_DCBX = 0x00000004, }; enum fw_caps_config_switch { FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001, FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002, }; enum fw_caps_config_nic { FW_CAPS_CONFIG_NIC = 0x00000001, FW_CAPS_CONFIG_NIC_VM = 0x00000002, FW_CAPS_CONFIG_NIC_IDS = 0x00000004, FW_CAPS_CONFIG_NIC_UM = 0x00000008, FW_CAPS_CONFIG_NIC_UM_ISGL = 0x00000010, FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020, FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040, }; enum fw_caps_config_toe { FW_CAPS_CONFIG_TOE = 0x00000001, }; enum fw_caps_config_rdma { FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001, FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002, }; enum fw_caps_config_iscsi { FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001, FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002, FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004, FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008, FW_CAPS_CONFIG_ISCSI_INITIATOR_SSNOFLD = 0x00000010, FW_CAPS_CONFIG_ISCSI_TARGET_SSNOFLD = 0x00000020, FW_CAPS_CONFIG_ISCSI_T10DIF = 0x00000040, FW_CAPS_CONFIG_ISCSI_INITIATOR_CMDOFLD = 0x00000080, FW_CAPS_CONFIG_ISCSI_TARGET_CMDOFLD = 0x00000100, }; enum fw_caps_config_crypto { FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001, FW_CAPS_CONFIG_TLSKEYS = 0x00000002, FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004, FW_CAPS_CONFIG_TLS_HW = 0x00000008, }; enum fw_caps_config_fcoe { FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001, FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, FW_CAPS_CONFIG_FCOE_CTRL_OFLD = 0x00000004, FW_CAPS_CONFIG_POFCOE_INITIATOR = 0x00000008, FW_CAPS_CONFIG_POFCOE_TARGET = 0x00000010, }; enum fw_memtype_cf { FW_MEMTYPE_CF_EDC0 = FW_MEMTYPE_EDC0, FW_MEMTYPE_CF_EDC1 = FW_MEMTYPE_EDC1, FW_MEMTYPE_CF_EXTMEM = FW_MEMTYPE_EXTMEM, FW_MEMTYPE_CF_FLASH = FW_MEMTYPE_FLASH, FW_MEMTYPE_CF_INTERNAL = FW_MEMTYPE_INTERNAL, FW_MEMTYPE_CF_EXTMEM1 = FW_MEMTYPE_EXTMEM1, }; struct fw_caps_config_cmd { __be32 op_to_write; __be32 cfvalid_to_len16; __be32 r2; __be32 hwmbitmap; __be16 nbmcaps; __be16 linkcaps; __be16 switchcaps; __be16 r3; __be16 niccaps; __be16 toecaps; __be16 rdmacaps; __be16 cryptocaps; __be16 iscsicaps; __be16 fcoecaps; __be32 cfcsum; __be32 finiver; __be32 finicsum; }; #define S_FW_CAPS_CONFIG_CMD_CFVALID 27 #define M_FW_CAPS_CONFIG_CMD_CFVALID 0x1 #define V_FW_CAPS_CONFIG_CMD_CFVALID(x) ((x) << S_FW_CAPS_CONFIG_CMD_CFVALID) #define G_FW_CAPS_CONFIG_CMD_CFVALID(x) \ (((x) >> S_FW_CAPS_CONFIG_CMD_CFVALID) & M_FW_CAPS_CONFIG_CMD_CFVALID) #define F_FW_CAPS_CONFIG_CMD_CFVALID V_FW_CAPS_CONFIG_CMD_CFVALID(1U) #define S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 24 #define M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 0x7 #define V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \ ((x) << S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) #define G_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \ (((x) >> S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) & \ M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) #define S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 16 #define M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 0xff #define V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \ ((x) << S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) #define G_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \ (((x) >> S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) & \ M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) /* * params command mnemonics */ enum fw_params_mnem { FW_PARAMS_MNEM_DEV = 1, /* device params */ FW_PARAMS_MNEM_PFVF = 2, /* function params */ FW_PARAMS_MNEM_REG = 3, /* limited register access */ FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ FW_PARAMS_MNEM_CHNET = 5, /* chnet params */ FW_PARAMS_MNEM_LAST }; /* * device parameters */ #define S_FW_PARAMS_PARAM_FILTER_MODE 16 #define M_FW_PARAMS_PARAM_FILTER_MODE 0xffff #define V_FW_PARAMS_PARAM_FILTER_MODE(x) \ ((x) << S_FW_PARAMS_PARAM_FILTER_MODE) #define G_FW_PARAMS_PARAM_FILTER_MODE(x) \ (((x) >> S_FW_PARAMS_PARAM_FILTER_MODE) & \ M_FW_PARAMS_PARAM_FILTER_MODE) #define S_FW_PARAMS_PARAM_FILTER_MASK 0 #define M_FW_PARAMS_PARAM_FILTER_MASK 0xffff #define V_FW_PARAMS_PARAM_FILTER_MASK(x) \ ((x) << S_FW_PARAMS_PARAM_FILTER_MASK) #define G_FW_PARAMS_PARAM_FILTER_MASK(x) \ (((x) >> S_FW_PARAMS_PARAM_FILTER_MASK) & \ M_FW_PARAMS_PARAM_FILTER_MASK) enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs * allocated by the device's * Lookup Engine */ FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03, FW_PARAMS_PARAM_DEV_INTFVER_NIC = 0x04, FW_PARAMS_PARAM_DEV_INTFVER_VNIC = 0x05, FW_PARAMS_PARAM_DEV_INTFVER_OFLD = 0x06, FW_PARAMS_PARAM_DEV_INTFVER_RI = 0x07, FW_PARAMS_PARAM_DEV_INTFVER_ISCSIPDU = 0x08, FW_PARAMS_PARAM_DEV_INTFVER_ISCSI = 0x09, FW_PARAMS_PARAM_DEV_INTFVER_FCOE = 0x0A, FW_PARAMS_PARAM_DEV_FWREV = 0x0B, FW_PARAMS_PARAM_DEV_TPREV = 0x0C, FW_PARAMS_PARAM_DEV_CF = 0x0D, FW_PARAMS_PARAM_DEV_BYPASS = 0x0E, FW_PARAMS_PARAM_DEV_PHYFW = 0x0F, FW_PARAMS_PARAM_DEV_LOAD = 0x10, FW_PARAMS_PARAM_DEV_DIAG = 0x11, FW_PARAMS_PARAM_DEV_UCLK = 0x12, /* uP clock in khz */ FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */ FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER= 0x14,/* max supported ADAPTER IRD */ FW_PARAMS_PARAM_DEV_INTFVER_FCOEPDU = 0x15, FW_PARAMS_PARAM_DEV_MCINIT = 0x16, FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, FW_PARAMS_PARAM_DEV_FWCACHE = 0x18, FW_PARAMS_PARAM_DEV_RSSINFO = 0x19, FW_PARAMS_PARAM_DEV_SCFGREV = 0x1A, FW_PARAMS_PARAM_DEV_VPDREV = 0x1B, FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C, FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D, FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E, FW_PARAMS_PARAM_DEV_TPCHMAP = 0x1F, FW_PARAMS_PARAM_DEV_HMA_SIZE = 0x20, FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21, FW_PARAMS_PARAM_DEV_RING_BACKBONE = 0x22, FW_PARAMS_PARAM_DEV_PPOD_EDRAM = 0x23, FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24, FW_PARAMS_PARAM_DEV_ADD_SMAC = 0x25, FW_PARAMS_PARAM_DEV_HPFILTER_REGION_SUPPORT = 0x26, FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27, FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28, FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29, FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A, FW_PARAMS_PARAM_DEV_NUM_TM_CLASS = 0x2B, FW_PARAMS_PARAM_DEV_VF_TRVLAN = 0x2C, FW_PARAMS_PARAM_DEV_TCB_CACHE_FLUSH = 0x2D, FW_PARAMS_PARAM_DEV_FILTER = 0x2E, FW_PARAMS_PARAM_DEV_CLIP2_CMD = 0x2F, FW_PARAMS_PARAM_DEV_KTLS_HW = 0x31, }; /* * dev bypass parameters; actions and modes */ enum fw_params_param_dev_bypass { /* actions */ FW_PARAMS_PARAM_DEV_BYPASS_PFAIL = 0x00, FW_PARAMS_PARAM_DEV_BYPASS_CURRENT = 0x01, /* modes */ FW_PARAMS_PARAM_DEV_BYPASS_NORMAL = 0x00, FW_PARAMS_PARAM_DEV_BYPASS_DROP = 0x1, FW_PARAMS_PARAM_DEV_BYPASS_BYPASS = 0x2, }; enum fw_params_param_dev_phyfw { FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD = 0x00, FW_PARAMS_PARAM_DEV_PHYFW_VERSION = 0x01, }; enum fw_params_param_dev_diag { FW_PARAM_DEV_DIAG_TMP = 0x00, FW_PARAM_DEV_DIAG_VDD = 0x01, FW_PARAM_DEV_DIAG_MAXTMPTHRESH = 0x02, FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR = 0x03, }; enum fw_params_param_dev_filter{ FW_PARAM_DEV_FILTER_VNIC_MODE = 0x00, FW_PARAM_DEV_FILTER_MODE_MASK = 0x01, + + /* VNIC modes */ + FW_VNIC_MODE_PF_VF = 0, + FW_VNIC_MODE_OUTER_VLAN = 1, + FW_VNIC_MODE_ENCAP_EN = 2, }; enum fw_params_param_dev_ktls_hw { FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE = 0x00, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE = 0x01, FW_PARAMS_PARAM_DEV_KTLS_HW_USER_DISABLE = 0x00, FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE = 0x01, }; enum fw_params_param_dev_fwcache { FW_PARAM_DEV_FWCACHE_FLUSH = 0x00, FW_PARAM_DEV_FWCACHE_FLUSHINV = 0x01, }; /* * physical and virtual function parameters */ enum fw_params_param_pfvf { FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00, FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01, FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02, FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03, FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04, FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05, FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06, FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07, FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08, FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09, FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A, FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B, FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C, FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D, FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E, FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F, FW_PARAMS_PARAM_PFVF_RQ_END = 0x10, FW_PARAMS_PARAM_PFVF_PBL_START = 0x11, FW_PARAMS_PARAM_PFVF_PBL_END = 0x12, FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15, FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16, FW_PARAMS_PARAM_PFVF_CQ_START = 0x17, FW_PARAMS_PARAM_PFVF_CQ_END = 0x18, FW_PARAMS_PARAM_PFVF_SRQ_START = 0x19, FW_PARAMS_PARAM_PFVF_SRQ_END = 0x1A, FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, FW_PARAMS_PARAM_PFVF_VIID = 0x24, FW_PARAMS_PARAM_PFVF_CPMASK = 0x25, FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26, FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27, FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28, FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29, FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A, FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B, FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C, FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D, FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E, FW_PARAMS_PARAM_PFVF_ETHOFLD_START = 0x2F, FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30, FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31, FW_PARAMS_PARAM_PFVF_HPFILTER_START = 0x32, FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33, FW_PARAMS_PARAM_PFVF_TLS_START = 0x34, FW_PARAMS_PARAM_PFVF_TLS_END = 0x35, FW_PARAMS_PARAM_PFVF_RAWF_START = 0x36, FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37, FW_PARAMS_PARAM_PFVF_RSSKEYINFO = 0x38, FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39, FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A, FW_PARAMS_PARAM_PFVF_PPOD_EDRAM_START = 0x3B, FW_PARAMS_PARAM_PFVF_PPOD_EDRAM_END = 0x3C, FW_PARAMS_PARAM_PFVF_MAX_PKTS_PER_ETH_TX_PKTS_WR = 0x3D, FW_PARAMS_PARAM_PFVF_GET_SMT_START = 0x3E, FW_PARAMS_PARAM_PFVF_GET_SMT_SIZE = 0x3F, FW_PARAMS_PARAM_PFVF_LINK_STATE = 0x40, }; /* * virtual link state as seen by the specified VF */ enum vf_link_states { VF_LINK_STATE_AUTO = 0x00, VF_LINK_STATE_ENABLE = 0x01, VF_LINK_STATE_DISABLE = 0x02, }; /* * dma queue parameters */ enum fw_params_param_dmaq { FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00, FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, FW_PARAMS_PARAM_DMAQ_IQ_INTIDX = 0x02, FW_PARAMS_PARAM_DMAQ_IQ_DCA = 0x03, FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10, FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11, FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12, FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13, FW_PARAMS_PARAM_DMAQ_EQ_DCA = 0x14, FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX = 0x15, FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20, FW_PARAMS_PARAM_DMAQ_FLM_DCA = 0x30 }; /* * chnet parameters */ enum fw_params_param_chnet { FW_PARAMS_PARAM_CHNET_FLAGS = 0x00, }; enum fw_params_param_chnet_flags { FW_PARAMS_PARAM_CHNET_FLAGS_ENABLE_IPV6 = 0x1, FW_PARAMS_PARAM_CHNET_FLAGS_ENABLE_DAD = 0x2, FW_PARAMS_PARAM_CHNET_FLAGS_ENABLE_MLDV2= 0x4, FW_PARAMS_PARAM_CHNET_FLAGS_ENABLE_IPV6_SLAAC = 0x8, }; #define S_FW_PARAMS_MNEM 24 #define M_FW_PARAMS_MNEM 0xff #define V_FW_PARAMS_MNEM(x) ((x) << S_FW_PARAMS_MNEM) #define G_FW_PARAMS_MNEM(x) \ (((x) >> S_FW_PARAMS_MNEM) & M_FW_PARAMS_MNEM) #define S_FW_PARAMS_PARAM_X 16 #define M_FW_PARAMS_PARAM_X 0xff #define V_FW_PARAMS_PARAM_X(x) ((x) << S_FW_PARAMS_PARAM_X) #define G_FW_PARAMS_PARAM_X(x) \ (((x) >> S_FW_PARAMS_PARAM_X) & M_FW_PARAMS_PARAM_X) #define S_FW_PARAMS_PARAM_Y 8 #define M_FW_PARAMS_PARAM_Y 0xff #define V_FW_PARAMS_PARAM_Y(x) ((x) << S_FW_PARAMS_PARAM_Y) #define G_FW_PARAMS_PARAM_Y(x) \ (((x) >> S_FW_PARAMS_PARAM_Y) & M_FW_PARAMS_PARAM_Y) #define S_FW_PARAMS_PARAM_Z 0 #define M_FW_PARAMS_PARAM_Z 0xff #define V_FW_PARAMS_PARAM_Z(x) ((x) << S_FW_PARAMS_PARAM_Z) #define G_FW_PARAMS_PARAM_Z(x) \ (((x) >> S_FW_PARAMS_PARAM_Z) & M_FW_PARAMS_PARAM_Z) #define S_FW_PARAMS_PARAM_XYZ 0 #define M_FW_PARAMS_PARAM_XYZ 0xffffff #define V_FW_PARAMS_PARAM_XYZ(x) ((x) << S_FW_PARAMS_PARAM_XYZ) #define G_FW_PARAMS_PARAM_XYZ(x) \ (((x) >> S_FW_PARAMS_PARAM_XYZ) & M_FW_PARAMS_PARAM_XYZ) #define S_FW_PARAMS_PARAM_YZ 0 #define M_FW_PARAMS_PARAM_YZ 0xffff #define V_FW_PARAMS_PARAM_YZ(x) ((x) << S_FW_PARAMS_PARAM_YZ) #define G_FW_PARAMS_PARAM_YZ(x) \ (((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ) #define S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN 31 #define M_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN 0x1 #define V_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN(x) \ ((x) << S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN) #define G_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN(x) \ (((x) >> S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN) & \ M_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN) #define S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT 24 #define M_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT 0x3 #define V_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT(x) \ ((x) << S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT) #define G_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT(x) \ (((x) >> S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT) & \ M_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT) #define S_FW_PARAMS_PARAM_DMAQ_DCA_ST 0 #define M_FW_PARAMS_PARAM_DMAQ_DCA_ST 0x7ff #define V_FW_PARAMS_PARAM_DMAQ_DCA_ST(x) \ ((x) << S_FW_PARAMS_PARAM_DMAQ_DCA_ST) #define G_FW_PARAMS_PARAM_DMAQ_DCA_ST(x) \ (((x) >> S_FW_PARAMS_PARAM_DMAQ_DCA_ST) & M_FW_PARAMS_PARAM_DMAQ_DCA_ST) #define S_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE 29 #define M_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE 0x7 #define V_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE(x) \ ((x) << S_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE) #define G_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE(x) \ (((x) >> S_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE) & \ M_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE) #define S_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX 0 #define M_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX 0x3ff #define V_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX(x) \ ((x) << S_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX) #define G_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX(x) \ (((x) >> S_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX) & \ M_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX) struct fw_params_cmd { __be32 op_to_vfn; __be32 retval_len16; struct fw_params_param { __be32 mnem; __be32 val; } param[7]; }; #define S_FW_PARAMS_CMD_PFN 8 #define M_FW_PARAMS_CMD_PFN 0x7 #define V_FW_PARAMS_CMD_PFN(x) ((x) << S_FW_PARAMS_CMD_PFN) #define G_FW_PARAMS_CMD_PFN(x) \ (((x) >> S_FW_PARAMS_CMD_PFN) & M_FW_PARAMS_CMD_PFN) #define S_FW_PARAMS_CMD_VFN 0 #define M_FW_PARAMS_CMD_VFN 0xff #define V_FW_PARAMS_CMD_VFN(x) ((x) << S_FW_PARAMS_CMD_VFN) #define G_FW_PARAMS_CMD_VFN(x) \ (((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN) struct fw_pfvf_cmd { __be32 op_to_vfn; __be32 retval_len16; __be32 niqflint_niq; __be32 type_to_neq; __be32 tc_to_nexactf; __be32 r_caps_to_nethctrl; __be16 nricq; __be16 nriqp; __be32 r4; }; #define S_FW_PFVF_CMD_PFN 8 #define M_FW_PFVF_CMD_PFN 0x7 #define V_FW_PFVF_CMD_PFN(x) ((x) << S_FW_PFVF_CMD_PFN) #define G_FW_PFVF_CMD_PFN(x) \ (((x) >> S_FW_PFVF_CMD_PFN) & M_FW_PFVF_CMD_PFN) #define S_FW_PFVF_CMD_VFN 0 #define M_FW_PFVF_CMD_VFN 0xff #define V_FW_PFVF_CMD_VFN(x) ((x) << S_FW_PFVF_CMD_VFN) #define G_FW_PFVF_CMD_VFN(x) \ (((x) >> S_FW_PFVF_CMD_VFN) & M_FW_PFVF_CMD_VFN) #define S_FW_PFVF_CMD_NIQFLINT 20 #define M_FW_PFVF_CMD_NIQFLINT 0xfff #define V_FW_PFVF_CMD_NIQFLINT(x) ((x) << S_FW_PFVF_CMD_NIQFLINT) #define G_FW_PFVF_CMD_NIQFLINT(x) \ (((x) >> S_FW_PFVF_CMD_NIQFLINT) & M_FW_PFVF_CMD_NIQFLINT) #define S_FW_PFVF_CMD_NIQ 0 #define M_FW_PFVF_CMD_NIQ 0xfffff #define V_FW_PFVF_CMD_NIQ(x) ((x) << S_FW_PFVF_CMD_NIQ) #define G_FW_PFVF_CMD_NIQ(x) \ (((x) >> S_FW_PFVF_CMD_NIQ) & M_FW_PFVF_CMD_NIQ) #define S_FW_PFVF_CMD_TYPE 31 #define M_FW_PFVF_CMD_TYPE 0x1 #define V_FW_PFVF_CMD_TYPE(x) ((x) << S_FW_PFVF_CMD_TYPE) #define G_FW_PFVF_CMD_TYPE(x) \ (((x) >> S_FW_PFVF_CMD_TYPE) & M_FW_PFVF_CMD_TYPE) #define F_FW_PFVF_CMD_TYPE V_FW_PFVF_CMD_TYPE(1U) #define S_FW_PFVF_CMD_CMASK 24 #define M_FW_PFVF_CMD_CMASK 0xf #define V_FW_PFVF_CMD_CMASK(x) ((x) << S_FW_PFVF_CMD_CMASK) #define G_FW_PFVF_CMD_CMASK(x) \ (((x) >> S_FW_PFVF_CMD_CMASK) & M_FW_PFVF_CMD_CMASK) #define S_FW_PFVF_CMD_PMASK 20 #define M_FW_PFVF_CMD_PMASK 0xf #define V_FW_PFVF_CMD_PMASK(x) ((x) << S_FW_PFVF_CMD_PMASK) #define G_FW_PFVF_CMD_PMASK(x) \ (((x) >> S_FW_PFVF_CMD_PMASK) & M_FW_PFVF_CMD_PMASK) #define S_FW_PFVF_CMD_NEQ 0 #define M_FW_PFVF_CMD_NEQ 0xfffff #define V_FW_PFVF_CMD_NEQ(x) ((x) << S_FW_PFVF_CMD_NEQ) #define G_FW_PFVF_CMD_NEQ(x) \ (((x) >> S_FW_PFVF_CMD_NEQ) & M_FW_PFVF_CMD_NEQ) #define S_FW_PFVF_CMD_TC 24 #define M_FW_PFVF_CMD_TC 0xff #define V_FW_PFVF_CMD_TC(x) ((x) << S_FW_PFVF_CMD_TC) #define G_FW_PFVF_CMD_TC(x) \ (((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC) #define S_FW_PFVF_CMD_NVI 16 #define M_FW_PFVF_CMD_NVI 0xff #define V_FW_PFVF_CMD_NVI(x) ((x) << S_FW_PFVF_CMD_NVI) #define G_FW_PFVF_CMD_NVI(x) \ (((x) >> S_FW_PFVF_CMD_NVI) & M_FW_PFVF_CMD_NVI) #define S_FW_PFVF_CMD_NEXACTF 0 #define M_FW_PFVF_CMD_NEXACTF 0xffff #define V_FW_PFVF_CMD_NEXACTF(x) ((x) << S_FW_PFVF_CMD_NEXACTF) #define G_FW_PFVF_CMD_NEXACTF(x) \ (((x) >> S_FW_PFVF_CMD_NEXACTF) & M_FW_PFVF_CMD_NEXACTF) #define S_FW_PFVF_CMD_R_CAPS 24 #define M_FW_PFVF_CMD_R_CAPS 0xff #define V_FW_PFVF_CMD_R_CAPS(x) ((x) << S_FW_PFVF_CMD_R_CAPS) #define G_FW_PFVF_CMD_R_CAPS(x) \ (((x) >> S_FW_PFVF_CMD_R_CAPS) & M_FW_PFVF_CMD_R_CAPS) #define S_FW_PFVF_CMD_WX_CAPS 16 #define M_FW_PFVF_CMD_WX_CAPS 0xff #define V_FW_PFVF_CMD_WX_CAPS(x) ((x) << S_FW_PFVF_CMD_WX_CAPS) #define G_FW_PFVF_CMD_WX_CAPS(x) \ (((x) >> S_FW_PFVF_CMD_WX_CAPS) & M_FW_PFVF_CMD_WX_CAPS) #define S_FW_PFVF_CMD_NETHCTRL 0 #define M_FW_PFVF_CMD_NETHCTRL 0xffff #define V_FW_PFVF_CMD_NETHCTRL(x) ((x) << S_FW_PFVF_CMD_NETHCTRL) #define G_FW_PFVF_CMD_NETHCTRL(x) \ (((x) >> S_FW_PFVF_CMD_NETHCTRL) & M_FW_PFVF_CMD_NETHCTRL) /* * ingress queue type; the first 1K ingress queues can have associated 0, * 1 or 2 free lists and an interrupt, all other ingress queues lack these * capabilities */ enum fw_iq_type { FW_IQ_TYPE_FL_INT_CAP, FW_IQ_TYPE_NO_FL_INT_CAP, FW_IQ_TYPE_VF_CQ }; enum fw_iq_iqtype { FW_IQ_IQTYPE_OTHER, FW_IQ_IQTYPE_NIC, FW_IQ_IQTYPE_OFLD, }; struct fw_iq_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; __be16 physiqid; __be16 iqid; __be16 fl0id; __be16 fl1id; __be32 type_to_iqandstindex; __be16 iqdroprss_to_iqesize; __be16 iqsize; __be64 iqaddr; __be32 iqns_to_fl0congen; __be16 fl0dcaen_to_fl0cidxfthresh; __be16 fl0size; __be64 fl0addr; __be32 fl1cngchmap_to_fl1congen; __be16 fl1dcaen_to_fl1cidxfthresh; __be16 fl1size; __be64 fl1addr; }; #define S_FW_IQ_CMD_PFN 8 #define M_FW_IQ_CMD_PFN 0x7 #define V_FW_IQ_CMD_PFN(x) ((x) << S_FW_IQ_CMD_PFN) #define G_FW_IQ_CMD_PFN(x) \ (((x) >> S_FW_IQ_CMD_PFN) & M_FW_IQ_CMD_PFN) #define S_FW_IQ_CMD_VFN 0 #define M_FW_IQ_CMD_VFN 0xff #define V_FW_IQ_CMD_VFN(x) ((x) << S_FW_IQ_CMD_VFN) #define G_FW_IQ_CMD_VFN(x) \ (((x) >> S_FW_IQ_CMD_VFN) & M_FW_IQ_CMD_VFN) #define S_FW_IQ_CMD_ALLOC 31 #define M_FW_IQ_CMD_ALLOC 0x1 #define V_FW_IQ_CMD_ALLOC(x) ((x) << S_FW_IQ_CMD_ALLOC) #define G_FW_IQ_CMD_ALLOC(x) \ (((x) >> S_FW_IQ_CMD_ALLOC) & M_FW_IQ_CMD_ALLOC) #define F_FW_IQ_CMD_ALLOC V_FW_IQ_CMD_ALLOC(1U) #define S_FW_IQ_CMD_FREE 30 #define M_FW_IQ_CMD_FREE 0x1 #define V_FW_IQ_CMD_FREE(x) ((x) << S_FW_IQ_CMD_FREE) #define G_FW_IQ_CMD_FREE(x) \ (((x) >> S_FW_IQ_CMD_FREE) & M_FW_IQ_CMD_FREE) #define F_FW_IQ_CMD_FREE V_FW_IQ_CMD_FREE(1U) #define S_FW_IQ_CMD_MODIFY 29 #define M_FW_IQ_CMD_MODIFY 0x1 #define V_FW_IQ_CMD_MODIFY(x) ((x) << S_FW_IQ_CMD_MODIFY) #define G_FW_IQ_CMD_MODIFY(x) \ (((x) >> S_FW_IQ_CMD_MODIFY) & M_FW_IQ_CMD_MODIFY) #define F_FW_IQ_CMD_MODIFY V_FW_IQ_CMD_MODIFY(1U) #define S_FW_IQ_CMD_IQSTART 28 #define M_FW_IQ_CMD_IQSTART 0x1 #define V_FW_IQ_CMD_IQSTART(x) ((x) << S_FW_IQ_CMD_IQSTART) #define G_FW_IQ_CMD_IQSTART(x) \ (((x) >> S_FW_IQ_CMD_IQSTART) & M_FW_IQ_CMD_IQSTART) #define F_FW_IQ_CMD_IQSTART V_FW_IQ_CMD_IQSTART(1U) #define S_FW_IQ_CMD_IQSTOP 27 #define M_FW_IQ_CMD_IQSTOP 0x1 #define V_FW_IQ_CMD_IQSTOP(x) ((x) << S_FW_IQ_CMD_IQSTOP) #define G_FW_IQ_CMD_IQSTOP(x) \ (((x) >> S_FW_IQ_CMD_IQSTOP) & M_FW_IQ_CMD_IQSTOP) #define F_FW_IQ_CMD_IQSTOP V_FW_IQ_CMD_IQSTOP(1U) #define S_FW_IQ_CMD_TYPE 29 #define M_FW_IQ_CMD_TYPE 0x7 #define V_FW_IQ_CMD_TYPE(x) ((x) << S_FW_IQ_CMD_TYPE) #define G_FW_IQ_CMD_TYPE(x) \ (((x) >> S_FW_IQ_CMD_TYPE) & M_FW_IQ_CMD_TYPE) #define S_FW_IQ_CMD_IQASYNCH 28 #define M_FW_IQ_CMD_IQASYNCH 0x1 #define V_FW_IQ_CMD_IQASYNCH(x) ((x) << S_FW_IQ_CMD_IQASYNCH) #define G_FW_IQ_CMD_IQASYNCH(x) \ (((x) >> S_FW_IQ_CMD_IQASYNCH) & M_FW_IQ_CMD_IQASYNCH) #define F_FW_IQ_CMD_IQASYNCH V_FW_IQ_CMD_IQASYNCH(1U) #define S_FW_IQ_CMD_VIID 16 #define M_FW_IQ_CMD_VIID 0xfff #define V_FW_IQ_CMD_VIID(x) ((x) << S_FW_IQ_CMD_VIID) #define G_FW_IQ_CMD_VIID(x) \ (((x) >> S_FW_IQ_CMD_VIID) & M_FW_IQ_CMD_VIID) #define S_FW_IQ_CMD_IQANDST 15 #define M_FW_IQ_CMD_IQANDST 0x1 #define V_FW_IQ_CMD_IQANDST(x) ((x) << S_FW_IQ_CMD_IQANDST) #define G_FW_IQ_CMD_IQANDST(x) \ (((x) >> S_FW_IQ_CMD_IQANDST) & M_FW_IQ_CMD_IQANDST) #define F_FW_IQ_CMD_IQANDST V_FW_IQ_CMD_IQANDST(1U) #define S_FW_IQ_CMD_IQANUS 14 #define M_FW_IQ_CMD_IQANUS 0x1 #define V_FW_IQ_CMD_IQANUS(x) ((x) << S_FW_IQ_CMD_IQANUS) #define G_FW_IQ_CMD_IQANUS(x) \ (((x) >> S_FW_IQ_CMD_IQANUS) & M_FW_IQ_CMD_IQANUS) #define F_FW_IQ_CMD_IQANUS V_FW_IQ_CMD_IQANUS(1U) #define S_FW_IQ_CMD_IQANUD 12 #define M_FW_IQ_CMD_IQANUD 0x3 #define V_FW_IQ_CMD_IQANUD(x) ((x) << S_FW_IQ_CMD_IQANUD) #define G_FW_IQ_CMD_IQANUD(x) \ (((x) >> S_FW_IQ_CMD_IQANUD) & M_FW_IQ_CMD_IQANUD) #define S_FW_IQ_CMD_IQANDSTINDEX 0 #define M_FW_IQ_CMD_IQANDSTINDEX 0xfff #define V_FW_IQ_CMD_IQANDSTINDEX(x) ((x) << S_FW_IQ_CMD_IQANDSTINDEX) #define G_FW_IQ_CMD_IQANDSTINDEX(x) \ (((x) >> S_FW_IQ_CMD_IQANDSTINDEX) & M_FW_IQ_CMD_IQANDSTINDEX) #define S_FW_IQ_CMD_IQDROPRSS 15 #define M_FW_IQ_CMD_IQDROPRSS 0x1 #define V_FW_IQ_CMD_IQDROPRSS(x) ((x) << S_FW_IQ_CMD_IQDROPRSS) #define G_FW_IQ_CMD_IQDROPRSS(x) \ (((x) >> S_FW_IQ_CMD_IQDROPRSS) & M_FW_IQ_CMD_IQDROPRSS) #define F_FW_IQ_CMD_IQDROPRSS V_FW_IQ_CMD_IQDROPRSS(1U) #define S_FW_IQ_CMD_IQGTSMODE 14 #define M_FW_IQ_CMD_IQGTSMODE 0x1 #define V_FW_IQ_CMD_IQGTSMODE(x) ((x) << S_FW_IQ_CMD_IQGTSMODE) #define G_FW_IQ_CMD_IQGTSMODE(x) \ (((x) >> S_FW_IQ_CMD_IQGTSMODE) & M_FW_IQ_CMD_IQGTSMODE) #define F_FW_IQ_CMD_IQGTSMODE V_FW_IQ_CMD_IQGTSMODE(1U) #define S_FW_IQ_CMD_IQPCIECH 12 #define M_FW_IQ_CMD_IQPCIECH 0x3 #define V_FW_IQ_CMD_IQPCIECH(x) ((x) << S_FW_IQ_CMD_IQPCIECH) #define G_FW_IQ_CMD_IQPCIECH(x) \ (((x) >> S_FW_IQ_CMD_IQPCIECH) & M_FW_IQ_CMD_IQPCIECH) #define S_FW_IQ_CMD_IQDCAEN 11 #define M_FW_IQ_CMD_IQDCAEN 0x1 #define V_FW_IQ_CMD_IQDCAEN(x) ((x) << S_FW_IQ_CMD_IQDCAEN) #define G_FW_IQ_CMD_IQDCAEN(x) \ (((x) >> S_FW_IQ_CMD_IQDCAEN) & M_FW_IQ_CMD_IQDCAEN) #define F_FW_IQ_CMD_IQDCAEN V_FW_IQ_CMD_IQDCAEN(1U) #define S_FW_IQ_CMD_IQDCACPU 6 #define M_FW_IQ_CMD_IQDCACPU 0x1f #define V_FW_IQ_CMD_IQDCACPU(x) ((x) << S_FW_IQ_CMD_IQDCACPU) #define G_FW_IQ_CMD_IQDCACPU(x) \ (((x) >> S_FW_IQ_CMD_IQDCACPU) & M_FW_IQ_CMD_IQDCACPU) #define S_FW_IQ_CMD_IQINTCNTTHRESH 4 #define M_FW_IQ_CMD_IQINTCNTTHRESH 0x3 #define V_FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << S_FW_IQ_CMD_IQINTCNTTHRESH) #define G_FW_IQ_CMD_IQINTCNTTHRESH(x) \ (((x) >> S_FW_IQ_CMD_IQINTCNTTHRESH) & M_FW_IQ_CMD_IQINTCNTTHRESH) #define S_FW_IQ_CMD_IQO 3 #define M_FW_IQ_CMD_IQO 0x1 #define V_FW_IQ_CMD_IQO(x) ((x) << S_FW_IQ_CMD_IQO) #define G_FW_IQ_CMD_IQO(x) \ (((x) >> S_FW_IQ_CMD_IQO) & M_FW_IQ_CMD_IQO) #define F_FW_IQ_CMD_IQO V_FW_IQ_CMD_IQO(1U) #define S_FW_IQ_CMD_IQCPRIO 2 #define M_FW_IQ_CMD_IQCPRIO 0x1 #define V_FW_IQ_CMD_IQCPRIO(x) ((x) << S_FW_IQ_CMD_IQCPRIO) #define G_FW_IQ_CMD_IQCPRIO(x) \ (((x) >> S_FW_IQ_CMD_IQCPRIO) & M_FW_IQ_CMD_IQCPRIO) #define F_FW_IQ_CMD_IQCPRIO V_FW_IQ_CMD_IQCPRIO(1U) #define S_FW_IQ_CMD_IQESIZE 0 #define M_FW_IQ_CMD_IQESIZE 0x3 #define V_FW_IQ_CMD_IQESIZE(x) ((x) << S_FW_IQ_CMD_IQESIZE) #define G_FW_IQ_CMD_IQESIZE(x) \ (((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE) #define S_FW_IQ_CMD_IQNS 31 #define M_FW_IQ_CMD_IQNS 0x1 #define V_FW_IQ_CMD_IQNS(x) ((x) << S_FW_IQ_CMD_IQNS) #define G_FW_IQ_CMD_IQNS(x) \ (((x) >> S_FW_IQ_CMD_IQNS) & M_FW_IQ_CMD_IQNS) #define F_FW_IQ_CMD_IQNS V_FW_IQ_CMD_IQNS(1U) #define S_FW_IQ_CMD_IQRO 30 #define M_FW_IQ_CMD_IQRO 0x1 #define V_FW_IQ_CMD_IQRO(x) ((x) << S_FW_IQ_CMD_IQRO) #define G_FW_IQ_CMD_IQRO(x) \ (((x) >> S_FW_IQ_CMD_IQRO) & M_FW_IQ_CMD_IQRO) #define F_FW_IQ_CMD_IQRO V_FW_IQ_CMD_IQRO(1U) #define S_FW_IQ_CMD_IQFLINTIQHSEN 28 #define M_FW_IQ_CMD_IQFLINTIQHSEN 0x3 #define V_FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << S_FW_IQ_CMD_IQFLINTIQHSEN) #define G_FW_IQ_CMD_IQFLINTIQHSEN(x) \ (((x) >> S_FW_IQ_CMD_IQFLINTIQHSEN) & M_FW_IQ_CMD_IQFLINTIQHSEN) #define S_FW_IQ_CMD_IQFLINTCONGEN 27 #define M_FW_IQ_CMD_IQFLINTCONGEN 0x1 #define V_FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << S_FW_IQ_CMD_IQFLINTCONGEN) #define G_FW_IQ_CMD_IQFLINTCONGEN(x) \ (((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN) #define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U) #define S_FW_IQ_CMD_IQFLINTISCSIC 26 #define M_FW_IQ_CMD_IQFLINTISCSIC 0x1 #define V_FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << S_FW_IQ_CMD_IQFLINTISCSIC) #define G_FW_IQ_CMD_IQFLINTISCSIC(x) \ (((x) >> S_FW_IQ_CMD_IQFLINTISCSIC) & M_FW_IQ_CMD_IQFLINTISCSIC) #define F_FW_IQ_CMD_IQFLINTISCSIC V_FW_IQ_CMD_IQFLINTISCSIC(1U) #define S_FW_IQ_CMD_IQTYPE 24 #define M_FW_IQ_CMD_IQTYPE 0x3 #define V_FW_IQ_CMD_IQTYPE(x) ((x) << S_FW_IQ_CMD_IQTYPE) #define G_FW_IQ_CMD_IQTYPE(x) \ (((x) >> S_FW_IQ_CMD_IQTYPE) & M_FW_IQ_CMD_IQTYPE) #define S_FW_IQ_CMD_FL0CNGCHMAP 20 #define M_FW_IQ_CMD_FL0CNGCHMAP 0xf #define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP) #define G_FW_IQ_CMD_FL0CNGCHMAP(x) \ (((x) >> S_FW_IQ_CMD_FL0CNGCHMAP) & M_FW_IQ_CMD_FL0CNGCHMAP) #define S_FW_IQ_CMD_FL0CONGDROP 16 #define M_FW_IQ_CMD_FL0CONGDROP 0x1 #define V_FW_IQ_CMD_FL0CONGDROP(x) ((x) << S_FW_IQ_CMD_FL0CONGDROP) #define G_FW_IQ_CMD_FL0CONGDROP(x) \ (((x) >> S_FW_IQ_CMD_FL0CONGDROP) & M_FW_IQ_CMD_FL0CONGDROP) #define F_FW_IQ_CMD_FL0CONGDROP V_FW_IQ_CMD_FL0CONGDROP(1U) #define S_FW_IQ_CMD_FL0CACHELOCK 15 #define M_FW_IQ_CMD_FL0CACHELOCK 0x1 #define V_FW_IQ_CMD_FL0CACHELOCK(x) ((x) << S_FW_IQ_CMD_FL0CACHELOCK) #define G_FW_IQ_CMD_FL0CACHELOCK(x) \ (((x) >> S_FW_IQ_CMD_FL0CACHELOCK) & M_FW_IQ_CMD_FL0CACHELOCK) #define F_FW_IQ_CMD_FL0CACHELOCK V_FW_IQ_CMD_FL0CACHELOCK(1U) #define S_FW_IQ_CMD_FL0DBP 14 #define M_FW_IQ_CMD_FL0DBP 0x1 #define V_FW_IQ_CMD_FL0DBP(x) ((x) << S_FW_IQ_CMD_FL0DBP) #define G_FW_IQ_CMD_FL0DBP(x) \ (((x) >> S_FW_IQ_CMD_FL0DBP) & M_FW_IQ_CMD_FL0DBP) #define F_FW_IQ_CMD_FL0DBP V_FW_IQ_CMD_FL0DBP(1U) #define S_FW_IQ_CMD_FL0DATANS 13 #define M_FW_IQ_CMD_FL0DATANS 0x1 #define V_FW_IQ_CMD_FL0DATANS(x) ((x) << S_FW_IQ_CMD_FL0DATANS) #define G_FW_IQ_CMD_FL0DATANS(x) \ (((x) >> S_FW_IQ_CMD_FL0DATANS) & M_FW_IQ_CMD_FL0DATANS) #define F_FW_IQ_CMD_FL0DATANS V_FW_IQ_CMD_FL0DATANS(1U) #define S_FW_IQ_CMD_FL0DATARO 12 #define M_FW_IQ_CMD_FL0DATARO 0x1 #define V_FW_IQ_CMD_FL0DATARO(x) ((x) << S_FW_IQ_CMD_FL0DATARO) #define G_FW_IQ_CMD_FL0DATARO(x) \ (((x) >> S_FW_IQ_CMD_FL0DATARO) & M_FW_IQ_CMD_FL0DATARO) #define F_FW_IQ_CMD_FL0DATARO V_FW_IQ_CMD_FL0DATARO(1U) #define S_FW_IQ_CMD_FL0CONGCIF 11 #define M_FW_IQ_CMD_FL0CONGCIF 0x1 #define V_FW_IQ_CMD_FL0CONGCIF(x) ((x) << S_FW_IQ_CMD_FL0CONGCIF) #define G_FW_IQ_CMD_FL0CONGCIF(x) \ (((x) >> S_FW_IQ_CMD_FL0CONGCIF) & M_FW_IQ_CMD_FL0CONGCIF) #define F_FW_IQ_CMD_FL0CONGCIF V_FW_IQ_CMD_FL0CONGCIF(1U) #define S_FW_IQ_CMD_FL0ONCHIP 10 #define M_FW_IQ_CMD_FL0ONCHIP 0x1 #define V_FW_IQ_CMD_FL0ONCHIP(x) ((x) << S_FW_IQ_CMD_FL0ONCHIP) #define G_FW_IQ_CMD_FL0ONCHIP(x) \ (((x) >> S_FW_IQ_CMD_FL0ONCHIP) & M_FW_IQ_CMD_FL0ONCHIP) #define F_FW_IQ_CMD_FL0ONCHIP V_FW_IQ_CMD_FL0ONCHIP(1U) #define S_FW_IQ_CMD_FL0STATUSPGNS 9 #define M_FW_IQ_CMD_FL0STATUSPGNS 0x1 #define V_FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << S_FW_IQ_CMD_FL0STATUSPGNS) #define G_FW_IQ_CMD_FL0STATUSPGNS(x) \ (((x) >> S_FW_IQ_CMD_FL0STATUSPGNS) & M_FW_IQ_CMD_FL0STATUSPGNS) #define F_FW_IQ_CMD_FL0STATUSPGNS V_FW_IQ_CMD_FL0STATUSPGNS(1U) #define S_FW_IQ_CMD_FL0STATUSPGRO 8 #define M_FW_IQ_CMD_FL0STATUSPGRO 0x1 #define V_FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << S_FW_IQ_CMD_FL0STATUSPGRO) #define G_FW_IQ_CMD_FL0STATUSPGRO(x) \ (((x) >> S_FW_IQ_CMD_FL0STATUSPGRO) & M_FW_IQ_CMD_FL0STATUSPGRO) #define F_FW_IQ_CMD_FL0STATUSPGRO V_FW_IQ_CMD_FL0STATUSPGRO(1U) #define S_FW_IQ_CMD_FL0FETCHNS 7 #define M_FW_IQ_CMD_FL0FETCHNS 0x1 #define V_FW_IQ_CMD_FL0FETCHNS(x) ((x) << S_FW_IQ_CMD_FL0FETCHNS) #define G_FW_IQ_CMD_FL0FETCHNS(x) \ (((x) >> S_FW_IQ_CMD_FL0FETCHNS) & M_FW_IQ_CMD_FL0FETCHNS) #define F_FW_IQ_CMD_FL0FETCHNS V_FW_IQ_CMD_FL0FETCHNS(1U) #define S_FW_IQ_CMD_FL0FETCHRO 6 #define M_FW_IQ_CMD_FL0FETCHRO 0x1 #define V_FW_IQ_CMD_FL0FETCHRO(x) ((x) << S_FW_IQ_CMD_FL0FETCHRO) #define G_FW_IQ_CMD_FL0FETCHRO(x) \ (((x) >> S_FW_IQ_CMD_FL0FETCHRO) & M_FW_IQ_CMD_FL0FETCHRO) #define F_FW_IQ_CMD_FL0FETCHRO V_FW_IQ_CMD_FL0FETCHRO(1U) #define S_FW_IQ_CMD_FL0HOSTFCMODE 4 #define M_FW_IQ_CMD_FL0HOSTFCMODE 0x3 #define V_FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << S_FW_IQ_CMD_FL0HOSTFCMODE) #define G_FW_IQ_CMD_FL0HOSTFCMODE(x) \ (((x) >> S_FW_IQ_CMD_FL0HOSTFCMODE) & M_FW_IQ_CMD_FL0HOSTFCMODE) #define S_FW_IQ_CMD_FL0CPRIO 3 #define M_FW_IQ_CMD_FL0CPRIO 0x1 #define V_FW_IQ_CMD_FL0CPRIO(x) ((x) << S_FW_IQ_CMD_FL0CPRIO) #define G_FW_IQ_CMD_FL0CPRIO(x) \ (((x) >> S_FW_IQ_CMD_FL0CPRIO) & M_FW_IQ_CMD_FL0CPRIO) #define F_FW_IQ_CMD_FL0CPRIO V_FW_IQ_CMD_FL0CPRIO(1U) #define S_FW_IQ_CMD_FL0PADEN 2 #define M_FW_IQ_CMD_FL0PADEN 0x1 #define V_FW_IQ_CMD_FL0PADEN(x) ((x) << S_FW_IQ_CMD_FL0PADEN) #define G_FW_IQ_CMD_FL0PADEN(x) \ (((x) >> S_FW_IQ_CMD_FL0PADEN) & M_FW_IQ_CMD_FL0PADEN) #define F_FW_IQ_CMD_FL0PADEN V_FW_IQ_CMD_FL0PADEN(1U) #define S_FW_IQ_CMD_FL0PACKEN 1 #define M_FW_IQ_CMD_FL0PACKEN 0x1 #define V_FW_IQ_CMD_FL0PACKEN(x) ((x) << S_FW_IQ_CMD_FL0PACKEN) #define G_FW_IQ_CMD_FL0PACKEN(x) \ (((x) >> S_FW_IQ_CMD_FL0PACKEN) & M_FW_IQ_CMD_FL0PACKEN) #define F_FW_IQ_CMD_FL0PACKEN V_FW_IQ_CMD_FL0PACKEN(1U) #define S_FW_IQ_CMD_FL0CONGEN 0 #define M_FW_IQ_CMD_FL0CONGEN 0x1 #define V_FW_IQ_CMD_FL0CONGEN(x) ((x) << S_FW_IQ_CMD_FL0CONGEN) #define G_FW_IQ_CMD_FL0CONGEN(x) \ (((x) >> S_FW_IQ_CMD_FL0CONGEN) & M_FW_IQ_CMD_FL0CONGEN) #define F_FW_IQ_CMD_FL0CONGEN V_FW_IQ_CMD_FL0CONGEN(1U) #define S_FW_IQ_CMD_FL0DCAEN 15 #define M_FW_IQ_CMD_FL0DCAEN 0x1 #define V_FW_IQ_CMD_FL0DCAEN(x) ((x) << S_FW_IQ_CMD_FL0DCAEN) #define G_FW_IQ_CMD_FL0DCAEN(x) \ (((x) >> S_FW_IQ_CMD_FL0DCAEN) & M_FW_IQ_CMD_FL0DCAEN) #define F_FW_IQ_CMD_FL0DCAEN V_FW_IQ_CMD_FL0DCAEN(1U) #define S_FW_IQ_CMD_FL0DCACPU 10 #define M_FW_IQ_CMD_FL0DCACPU 0x1f #define V_FW_IQ_CMD_FL0DCACPU(x) ((x) << S_FW_IQ_CMD_FL0DCACPU) #define G_FW_IQ_CMD_FL0DCACPU(x) \ (((x) >> S_FW_IQ_CMD_FL0DCACPU) & M_FW_IQ_CMD_FL0DCACPU) #define S_FW_IQ_CMD_FL0FBMIN 7 #define M_FW_IQ_CMD_FL0FBMIN 0x7 #define V_FW_IQ_CMD_FL0FBMIN(x) ((x) << S_FW_IQ_CMD_FL0FBMIN) #define G_FW_IQ_CMD_FL0FBMIN(x) \ (((x) >> S_FW_IQ_CMD_FL0FBMIN) & M_FW_IQ_CMD_FL0FBMIN) #define S_FW_IQ_CMD_FL0FBMAX 4 #define M_FW_IQ_CMD_FL0FBMAX 0x7 #define V_FW_IQ_CMD_FL0FBMAX(x) ((x) << S_FW_IQ_CMD_FL0FBMAX) #define G_FW_IQ_CMD_FL0FBMAX(x) \ (((x) >> S_FW_IQ_CMD_FL0FBMAX) & M_FW_IQ_CMD_FL0FBMAX) #define S_FW_IQ_CMD_FL0CIDXFTHRESHO 3 #define M_FW_IQ_CMD_FL0CIDXFTHRESHO 0x1 #define V_FW_IQ_CMD_FL0CIDXFTHRESHO(x) ((x) << S_FW_IQ_CMD_FL0CIDXFTHRESHO) #define G_FW_IQ_CMD_FL0CIDXFTHRESHO(x) \ (((x) >> S_FW_IQ_CMD_FL0CIDXFTHRESHO) & M_FW_IQ_CMD_FL0CIDXFTHRESHO) #define F_FW_IQ_CMD_FL0CIDXFTHRESHO V_FW_IQ_CMD_FL0CIDXFTHRESHO(1U) #define S_FW_IQ_CMD_FL0CIDXFTHRESH 0 #define M_FW_IQ_CMD_FL0CIDXFTHRESH 0x7 #define V_FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << S_FW_IQ_CMD_FL0CIDXFTHRESH) #define G_FW_IQ_CMD_FL0CIDXFTHRESH(x) \ (((x) >> S_FW_IQ_CMD_FL0CIDXFTHRESH) & M_FW_IQ_CMD_FL0CIDXFTHRESH) #define S_FW_IQ_CMD_FL1CNGCHMAP 20 #define M_FW_IQ_CMD_FL1CNGCHMAP 0xf #define V_FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL1CNGCHMAP) #define G_FW_IQ_CMD_FL1CNGCHMAP(x) \ (((x) >> S_FW_IQ_CMD_FL1CNGCHMAP) & M_FW_IQ_CMD_FL1CNGCHMAP) #define S_FW_IQ_CMD_FL1CONGDROP 16 #define M_FW_IQ_CMD_FL1CONGDROP 0x1 #define V_FW_IQ_CMD_FL1CONGDROP(x) ((x) << S_FW_IQ_CMD_FL1CONGDROP) #define G_FW_IQ_CMD_FL1CONGDROP(x) \ (((x) >> S_FW_IQ_CMD_FL1CONGDROP) & M_FW_IQ_CMD_FL1CONGDROP) #define F_FW_IQ_CMD_FL1CONGDROP V_FW_IQ_CMD_FL1CONGDROP(1U) #define S_FW_IQ_CMD_FL1CACHELOCK 15 #define M_FW_IQ_CMD_FL1CACHELOCK 0x1 #define V_FW_IQ_CMD_FL1CACHELOCK(x) ((x) << S_FW_IQ_CMD_FL1CACHELOCK) #define G_FW_IQ_CMD_FL1CACHELOCK(x) \ (((x) >> S_FW_IQ_CMD_FL1CACHELOCK) & M_FW_IQ_CMD_FL1CACHELOCK) #define F_FW_IQ_CMD_FL1CACHELOCK V_FW_IQ_CMD_FL1CACHELOCK(1U) #define S_FW_IQ_CMD_FL1DBP 14 #define M_FW_IQ_CMD_FL1DBP 0x1 #define V_FW_IQ_CMD_FL1DBP(x) ((x) << S_FW_IQ_CMD_FL1DBP) #define G_FW_IQ_CMD_FL1DBP(x) \ (((x) >> S_FW_IQ_CMD_FL1DBP) & M_FW_IQ_CMD_FL1DBP) #define F_FW_IQ_CMD_FL1DBP V_FW_IQ_CMD_FL1DBP(1U) #define S_FW_IQ_CMD_FL1DATANS 13 #define M_FW_IQ_CMD_FL1DATANS 0x1 #define V_FW_IQ_CMD_FL1DATANS(x) ((x) << S_FW_IQ_CMD_FL1DATANS) #define G_FW_IQ_CMD_FL1DATANS(x) \ (((x) >> S_FW_IQ_CMD_FL1DATANS) & M_FW_IQ_CMD_FL1DATANS) #define F_FW_IQ_CMD_FL1DATANS V_FW_IQ_CMD_FL1DATANS(1U) #define S_FW_IQ_CMD_FL1DATARO 12 #define M_FW_IQ_CMD_FL1DATARO 0x1 #define V_FW_IQ_CMD_FL1DATARO(x) ((x) << S_FW_IQ_CMD_FL1DATARO) #define G_FW_IQ_CMD_FL1DATARO(x) \ (((x) >> S_FW_IQ_CMD_FL1DATARO) & M_FW_IQ_CMD_FL1DATARO) #define F_FW_IQ_CMD_FL1DATARO V_FW_IQ_CMD_FL1DATARO(1U) #define S_FW_IQ_CMD_FL1CONGCIF 11 #define M_FW_IQ_CMD_FL1CONGCIF 0x1 #define V_FW_IQ_CMD_FL1CONGCIF(x) ((x) << S_FW_IQ_CMD_FL1CONGCIF) #define G_FW_IQ_CMD_FL1CONGCIF(x) \ (((x) >> S_FW_IQ_CMD_FL1CONGCIF) & M_FW_IQ_CMD_FL1CONGCIF) #define F_FW_IQ_CMD_FL1CONGCIF V_FW_IQ_CMD_FL1CONGCIF(1U) #define S_FW_IQ_CMD_FL1ONCHIP 10 #define M_FW_IQ_CMD_FL1ONCHIP 0x1 #define V_FW_IQ_CMD_FL1ONCHIP(x) ((x) << S_FW_IQ_CMD_FL1ONCHIP) #define G_FW_IQ_CMD_FL1ONCHIP(x) \ (((x) >> S_FW_IQ_CMD_FL1ONCHIP) & M_FW_IQ_CMD_FL1ONCHIP) #define F_FW_IQ_CMD_FL1ONCHIP V_FW_IQ_CMD_FL1ONCHIP(1U) #define S_FW_IQ_CMD_FL1STATUSPGNS 9 #define M_FW_IQ_CMD_FL1STATUSPGNS 0x1 #define V_FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << S_FW_IQ_CMD_FL1STATUSPGNS) #define G_FW_IQ_CMD_FL1STATUSPGNS(x) \ (((x) >> S_FW_IQ_CMD_FL1STATUSPGNS) & M_FW_IQ_CMD_FL1STATUSPGNS) #define F_FW_IQ_CMD_FL1STATUSPGNS V_FW_IQ_CMD_FL1STATUSPGNS(1U) #define S_FW_IQ_CMD_FL1STATUSPGRO 8 #define M_FW_IQ_CMD_FL1STATUSPGRO 0x1 #define V_FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << S_FW_IQ_CMD_FL1STATUSPGRO) #define G_FW_IQ_CMD_FL1STATUSPGRO(x) \ (((x) >> S_FW_IQ_CMD_FL1STATUSPGRO) & M_FW_IQ_CMD_FL1STATUSPGRO) #define F_FW_IQ_CMD_FL1STATUSPGRO V_FW_IQ_CMD_FL1STATUSPGRO(1U) #define S_FW_IQ_CMD_FL1FETCHNS 7 #define M_FW_IQ_CMD_FL1FETCHNS 0x1 #define V_FW_IQ_CMD_FL1FETCHNS(x) ((x) << S_FW_IQ_CMD_FL1FETCHNS) #define G_FW_IQ_CMD_FL1FETCHNS(x) \ (((x) >> S_FW_IQ_CMD_FL1FETCHNS) & M_FW_IQ_CMD_FL1FETCHNS) #define F_FW_IQ_CMD_FL1FETCHNS V_FW_IQ_CMD_FL1FETCHNS(1U) #define S_FW_IQ_CMD_FL1FETCHRO 6 #define M_FW_IQ_CMD_FL1FETCHRO 0x1 #define V_FW_IQ_CMD_FL1FETCHRO(x) ((x) << S_FW_IQ_CMD_FL1FETCHRO) #define G_FW_IQ_CMD_FL1FETCHRO(x) \ (((x) >> S_FW_IQ_CMD_FL1FETCHRO) & M_FW_IQ_CMD_FL1FETCHRO) #define F_FW_IQ_CMD_FL1FETCHRO V_FW_IQ_CMD_FL1FETCHRO(1U) #define S_FW_IQ_CMD_FL1HOSTFCMODE 4 #define M_FW_IQ_CMD_FL1HOSTFCMODE 0x3 #define V_FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << S_FW_IQ_CMD_FL1HOSTFCMODE) #define G_FW_IQ_CMD_FL1HOSTFCMODE(x) \ (((x) >> S_FW_IQ_CMD_FL1HOSTFCMODE) & M_FW_IQ_CMD_FL1HOSTFCMODE) #define S_FW_IQ_CMD_FL1CPRIO 3 #define M_FW_IQ_CMD_FL1CPRIO 0x1 #define V_FW_IQ_CMD_FL1CPRIO(x) ((x) << S_FW_IQ_CMD_FL1CPRIO) #define G_FW_IQ_CMD_FL1CPRIO(x) \ (((x) >> S_FW_IQ_CMD_FL1CPRIO) & M_FW_IQ_CMD_FL1CPRIO) #define F_FW_IQ_CMD_FL1CPRIO V_FW_IQ_CMD_FL1CPRIO(1U) #define S_FW_IQ_CMD_FL1PADEN 2 #define M_FW_IQ_CMD_FL1PADEN 0x1 #define V_FW_IQ_CMD_FL1PADEN(x) ((x) << S_FW_IQ_CMD_FL1PADEN) #define G_FW_IQ_CMD_FL1PADEN(x) \ (((x) >> S_FW_IQ_CMD_FL1PADEN) & M_FW_IQ_CMD_FL1PADEN) #define F_FW_IQ_CMD_FL1PADEN V_FW_IQ_CMD_FL1PADEN(1U) #define S_FW_IQ_CMD_FL1PACKEN 1 #define M_FW_IQ_CMD_FL1PACKEN 0x1 #define V_FW_IQ_CMD_FL1PACKEN(x) ((x) << S_FW_IQ_CMD_FL1PACKEN) #define G_FW_IQ_CMD_FL1PACKEN(x) \ (((x) >> S_FW_IQ_CMD_FL1PACKEN) & M_FW_IQ_CMD_FL1PACKEN) #define F_FW_IQ_CMD_FL1PACKEN V_FW_IQ_CMD_FL1PACKEN(1U) #define S_FW_IQ_CMD_FL1CONGEN 0 #define M_FW_IQ_CMD_FL1CONGEN 0x1 #define V_FW_IQ_CMD_FL1CONGEN(x) ((x) << S_FW_IQ_CMD_FL1CONGEN) #define G_FW_IQ_CMD_FL1CONGEN(x) \ (((x) >> S_FW_IQ_CMD_FL1CONGEN) & M_FW_IQ_CMD_FL1CONGEN) #define F_FW_IQ_CMD_FL1CONGEN V_FW_IQ_CMD_FL1CONGEN(1U) #define S_FW_IQ_CMD_FL1DCAEN 15 #define M_FW_IQ_CMD_FL1DCAEN 0x1 #define V_FW_IQ_CMD_FL1DCAEN(x) ((x) << S_FW_IQ_CMD_FL1DCAEN) #define G_FW_IQ_CMD_FL1DCAEN(x) \ (((x) >> S_FW_IQ_CMD_FL1DCAEN) & M_FW_IQ_CMD_FL1DCAEN) #define F_FW_IQ_CMD_FL1DCAEN V_FW_IQ_CMD_FL1DCAEN(1U) #define S_FW_IQ_CMD_FL1DCACPU 10 #define M_FW_IQ_CMD_FL1DCACPU 0x1f #define V_FW_IQ_CMD_FL1DCACPU(x) ((x) << S_FW_IQ_CMD_FL1DCACPU) #define G_FW_IQ_CMD_FL1DCACPU(x) \ (((x) >> S_FW_IQ_CMD_FL1DCACPU) & M_FW_IQ_CMD_FL1DCACPU) #define S_FW_IQ_CMD_FL1FBMIN 7 #define M_FW_IQ_CMD_FL1FBMIN 0x7 #define V_FW_IQ_CMD_FL1FBMIN(x) ((x) << S_FW_IQ_CMD_FL1FBMIN) #define G_FW_IQ_CMD_FL1FBMIN(x) \ (((x) >> S_FW_IQ_CMD_FL1FBMIN) & M_FW_IQ_CMD_FL1FBMIN) #define S_FW_IQ_CMD_FL1FBMAX 4 #define M_FW_IQ_CMD_FL1FBMAX 0x7 #define V_FW_IQ_CMD_FL1FBMAX(x) ((x) << S_FW_IQ_CMD_FL1FBMAX) #define G_FW_IQ_CMD_FL1FBMAX(x) \ (((x) >> S_FW_IQ_CMD_FL1FBMAX) & M_FW_IQ_CMD_FL1FBMAX) #define S_FW_IQ_CMD_FL1CIDXFTHRESHO 3 #define M_FW_IQ_CMD_FL1CIDXFTHRESHO 0x1 #define V_FW_IQ_CMD_FL1CIDXFTHRESHO(x) ((x) << S_FW_IQ_CMD_FL1CIDXFTHRESHO) #define G_FW_IQ_CMD_FL1CIDXFTHRESHO(x) \ (((x) >> S_FW_IQ_CMD_FL1CIDXFTHRESHO) & M_FW_IQ_CMD_FL1CIDXFTHRESHO) #define F_FW_IQ_CMD_FL1CIDXFTHRESHO V_FW_IQ_CMD_FL1CIDXFTHRESHO(1U) #define S_FW_IQ_CMD_FL1CIDXFTHRESH 0 #define M_FW_IQ_CMD_FL1CIDXFTHRESH 0x7 #define V_FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << S_FW_IQ_CMD_FL1CIDXFTHRESH) #define G_FW_IQ_CMD_FL1CIDXFTHRESH(x) \ (((x) >> S_FW_IQ_CMD_FL1CIDXFTHRESH) & M_FW_IQ_CMD_FL1CIDXFTHRESH) struct fw_eq_mngt_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; __be32 cmpliqid_eqid; __be32 physeqid_pkd; __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; }; #define S_FW_EQ_MNGT_CMD_PFN 8 #define M_FW_EQ_MNGT_CMD_PFN 0x7 #define V_FW_EQ_MNGT_CMD_PFN(x) ((x) << S_FW_EQ_MNGT_CMD_PFN) #define G_FW_EQ_MNGT_CMD_PFN(x) \ (((x) >> S_FW_EQ_MNGT_CMD_PFN) & M_FW_EQ_MNGT_CMD_PFN) #define S_FW_EQ_MNGT_CMD_VFN 0 #define M_FW_EQ_MNGT_CMD_VFN 0xff #define V_FW_EQ_MNGT_CMD_VFN(x) ((x) << S_FW_EQ_MNGT_CMD_VFN) #define G_FW_EQ_MNGT_CMD_VFN(x) \ (((x) >> S_FW_EQ_MNGT_CMD_VFN) & M_FW_EQ_MNGT_CMD_VFN) #define S_FW_EQ_MNGT_CMD_ALLOC 31 #define M_FW_EQ_MNGT_CMD_ALLOC 0x1 #define V_FW_EQ_MNGT_CMD_ALLOC(x) ((x) << S_FW_EQ_MNGT_CMD_ALLOC) #define G_FW_EQ_MNGT_CMD_ALLOC(x) \ (((x) >> S_FW_EQ_MNGT_CMD_ALLOC) & M_FW_EQ_MNGT_CMD_ALLOC) #define F_FW_EQ_MNGT_CMD_ALLOC V_FW_EQ_MNGT_CMD_ALLOC(1U) #define S_FW_EQ_MNGT_CMD_FREE 30 #define M_FW_EQ_MNGT_CMD_FREE 0x1 #define V_FW_EQ_MNGT_CMD_FREE(x) ((x) << S_FW_EQ_MNGT_CMD_FREE) #define G_FW_EQ_MNGT_CMD_FREE(x) \ (((x) >> S_FW_EQ_MNGT_CMD_FREE) & M_FW_EQ_MNGT_CMD_FREE) #define F_FW_EQ_MNGT_CMD_FREE V_FW_EQ_MNGT_CMD_FREE(1U) #define S_FW_EQ_MNGT_CMD_MODIFY 29 #define M_FW_EQ_MNGT_CMD_MODIFY 0x1 #define V_FW_EQ_MNGT_CMD_MODIFY(x) ((x) << S_FW_EQ_MNGT_CMD_MODIFY) #define G_FW_EQ_MNGT_CMD_MODIFY(x) \ (((x) >> S_FW_EQ_MNGT_CMD_MODIFY) & M_FW_EQ_MNGT_CMD_MODIFY) #define F_FW_EQ_MNGT_CMD_MODIFY V_FW_EQ_MNGT_CMD_MODIFY(1U) #define S_FW_EQ_MNGT_CMD_EQSTART 28 #define M_FW_EQ_MNGT_CMD_EQSTART 0x1 #define V_FW_EQ_MNGT_CMD_EQSTART(x) ((x) << S_FW_EQ_MNGT_CMD_EQSTART) #define G_FW_EQ_MNGT_CMD_EQSTART(x) \ (((x) >> S_FW_EQ_MNGT_CMD_EQSTART) & M_FW_EQ_MNGT_CMD_EQSTART) #define F_FW_EQ_MNGT_CMD_EQSTART V_FW_EQ_MNGT_CMD_EQSTART(1U) #define S_FW_EQ_MNGT_CMD_EQSTOP 27 #define M_FW_EQ_MNGT_CMD_EQSTOP 0x1 #define V_FW_EQ_MNGT_CMD_EQSTOP(x) ((x) << S_FW_EQ_MNGT_CMD_EQSTOP) #define G_FW_EQ_MNGT_CMD_EQSTOP(x) \ (((x) >> S_FW_EQ_MNGT_CMD_EQSTOP) & M_FW_EQ_MNGT_CMD_EQSTOP) #define F_FW_EQ_MNGT_CMD_EQSTOP V_FW_EQ_MNGT_CMD_EQSTOP(1U) #define S_FW_EQ_MNGT_CMD_CMPLIQID 20 #define M_FW_EQ_MNGT_CMD_CMPLIQID 0xfff #define V_FW_EQ_MNGT_CMD_CMPLIQID(x) ((x) << S_FW_EQ_MNGT_CMD_CMPLIQID) #define G_FW_EQ_MNGT_CMD_CMPLIQID(x) \ (((x) >> S_FW_EQ_MNGT_CMD_CMPLIQID) & M_FW_EQ_MNGT_CMD_CMPLIQID) #define S_FW_EQ_MNGT_CMD_EQID 0 #define M_FW_EQ_MNGT_CMD_EQID 0xfffff #define V_FW_EQ_MNGT_CMD_EQID(x) ((x) << S_FW_EQ_MNGT_CMD_EQID) #define G_FW_EQ_MNGT_CMD_EQID(x) \ (((x) >> S_FW_EQ_MNGT_CMD_EQID) & M_FW_EQ_MNGT_CMD_EQID) #define S_FW_EQ_MNGT_CMD_PHYSEQID 0 #define M_FW_EQ_MNGT_CMD_PHYSEQID 0xfffff #define V_FW_EQ_MNGT_CMD_PHYSEQID(x) ((x) << S_FW_EQ_MNGT_CMD_PHYSEQID) #define G_FW_EQ_MNGT_CMD_PHYSEQID(x) \ (((x) >> S_FW_EQ_MNGT_CMD_PHYSEQID) & M_FW_EQ_MNGT_CMD_PHYSEQID) #define S_FW_EQ_MNGT_CMD_FETCHSZM 26 #define M_FW_EQ_MNGT_CMD_FETCHSZM 0x1 #define V_FW_EQ_MNGT_CMD_FETCHSZM(x) ((x) << S_FW_EQ_MNGT_CMD_FETCHSZM) #define G_FW_EQ_MNGT_CMD_FETCHSZM(x) \ (((x) >> S_FW_EQ_MNGT_CMD_FETCHSZM) & M_FW_EQ_MNGT_CMD_FETCHSZM) #define F_FW_EQ_MNGT_CMD_FETCHSZM V_FW_EQ_MNGT_CMD_FETCHSZM(1U) #define S_FW_EQ_MNGT_CMD_STATUSPGNS 25 #define M_FW_EQ_MNGT_CMD_STATUSPGNS 0x1 #define V_FW_EQ_MNGT_CMD_STATUSPGNS(x) ((x) << S_FW_EQ_MNGT_CMD_STATUSPGNS) #define G_FW_EQ_MNGT_CMD_STATUSPGNS(x) \ (((x) >> S_FW_EQ_MNGT_CMD_STATUSPGNS) & M_FW_EQ_MNGT_CMD_STATUSPGNS) #define F_FW_EQ_MNGT_CMD_STATUSPGNS V_FW_EQ_MNGT_CMD_STATUSPGNS(1U) #define S_FW_EQ_MNGT_CMD_STATUSPGRO 24 #define M_FW_EQ_MNGT_CMD_STATUSPGRO 0x1 #define V_FW_EQ_MNGT_CMD_STATUSPGRO(x) ((x) << S_FW_EQ_MNGT_CMD_STATUSPGRO) #define G_FW_EQ_MNGT_CMD_STATUSPGRO(x) \ (((x) >> S_FW_EQ_MNGT_CMD_STATUSPGRO) & M_FW_EQ_MNGT_CMD_STATUSPGRO) #define F_FW_EQ_MNGT_CMD_STATUSPGRO V_FW_EQ_MNGT_CMD_STATUSPGRO(1U) #define S_FW_EQ_MNGT_CMD_FETCHNS 23 #define M_FW_EQ_MNGT_CMD_FETCHNS 0x1 #define V_FW_EQ_MNGT_CMD_FETCHNS(x) ((x) << S_FW_EQ_MNGT_CMD_FETCHNS) #define G_FW_EQ_MNGT_CMD_FETCHNS(x) \ (((x) >> S_FW_EQ_MNGT_CMD_FETCHNS) & M_FW_EQ_MNGT_CMD_FETCHNS) #define F_FW_EQ_MNGT_CMD_FETCHNS V_FW_EQ_MNGT_CMD_FETCHNS(1U) #define S_FW_EQ_MNGT_CMD_FETCHRO 22 #define M_FW_EQ_MNGT_CMD_FETCHRO 0x1 #define V_FW_EQ_MNGT_CMD_FETCHRO(x) ((x) << S_FW_EQ_MNGT_CMD_FETCHRO) #define G_FW_EQ_MNGT_CMD_FETCHRO(x) \ (((x) >> S_FW_EQ_MNGT_CMD_FETCHRO) & M_FW_EQ_MNGT_CMD_FETCHRO) #define F_FW_EQ_MNGT_CMD_FETCHRO V_FW_EQ_MNGT_CMD_FETCHRO(1U) #define S_FW_EQ_MNGT_CMD_HOSTFCMODE 20 #define M_FW_EQ_MNGT_CMD_HOSTFCMODE 0x3 #define V_FW_EQ_MNGT_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_MNGT_CMD_HOSTFCMODE) #define G_FW_EQ_MNGT_CMD_HOSTFCMODE(x) \ (((x) >> S_FW_EQ_MNGT_CMD_HOSTFCMODE) & M_FW_EQ_MNGT_CMD_HOSTFCMODE) #define S_FW_EQ_MNGT_CMD_CPRIO 19 #define M_FW_EQ_MNGT_CMD_CPRIO 0x1 #define V_FW_EQ_MNGT_CMD_CPRIO(x) ((x) << S_FW_EQ_MNGT_CMD_CPRIO) #define G_FW_EQ_MNGT_CMD_CPRIO(x) \ (((x) >> S_FW_EQ_MNGT_CMD_CPRIO) & M_FW_EQ_MNGT_CMD_CPRIO) #define F_FW_EQ_MNGT_CMD_CPRIO V_FW_EQ_MNGT_CMD_CPRIO(1U) #define S_FW_EQ_MNGT_CMD_ONCHIP 18 #define M_FW_EQ_MNGT_CMD_ONCHIP 0x1 #define V_FW_EQ_MNGT_CMD_ONCHIP(x) ((x) << S_FW_EQ_MNGT_CMD_ONCHIP) #define G_FW_EQ_MNGT_CMD_ONCHIP(x) \ (((x) >> S_FW_EQ_MNGT_CMD_ONCHIP) & M_FW_EQ_MNGT_CMD_ONCHIP) #define F_FW_EQ_MNGT_CMD_ONCHIP V_FW_EQ_MNGT_CMD_ONCHIP(1U) #define S_FW_EQ_MNGT_CMD_PCIECHN 16 #define M_FW_EQ_MNGT_CMD_PCIECHN 0x3 #define V_FW_EQ_MNGT_CMD_PCIECHN(x) ((x) << S_FW_EQ_MNGT_CMD_PCIECHN) #define G_FW_EQ_MNGT_CMD_PCIECHN(x) \ (((x) >> S_FW_EQ_MNGT_CMD_PCIECHN) & M_FW_EQ_MNGT_CMD_PCIECHN) #define S_FW_EQ_MNGT_CMD_IQID 0 #define M_FW_EQ_MNGT_CMD_IQID 0xffff #define V_FW_EQ_MNGT_CMD_IQID(x) ((x) << S_FW_EQ_MNGT_CMD_IQID) #define G_FW_EQ_MNGT_CMD_IQID(x) \ (((x) >> S_FW_EQ_MNGT_CMD_IQID) & M_FW_EQ_MNGT_CMD_IQID) #define S_FW_EQ_MNGT_CMD_DCAEN 31 #define M_FW_EQ_MNGT_CMD_DCAEN 0x1 #define V_FW_EQ_MNGT_CMD_DCAEN(x) ((x) << S_FW_EQ_MNGT_CMD_DCAEN) #define G_FW_EQ_MNGT_CMD_DCAEN(x) \ (((x) >> S_FW_EQ_MNGT_CMD_DCAEN) & M_FW_EQ_MNGT_CMD_DCAEN) #define F_FW_EQ_MNGT_CMD_DCAEN V_FW_EQ_MNGT_CMD_DCAEN(1U) #define S_FW_EQ_MNGT_CMD_DCACPU 26 #define M_FW_EQ_MNGT_CMD_DCACPU 0x1f #define V_FW_EQ_MNGT_CMD_DCACPU(x) ((x) << S_FW_EQ_MNGT_CMD_DCACPU) #define G_FW_EQ_MNGT_CMD_DCACPU(x) \ (((x) >> S_FW_EQ_MNGT_CMD_DCACPU) & M_FW_EQ_MNGT_CMD_DCACPU) #define S_FW_EQ_MNGT_CMD_FBMIN 23 #define M_FW_EQ_MNGT_CMD_FBMIN 0x7 #define V_FW_EQ_MNGT_CMD_FBMIN(x) ((x) << S_FW_EQ_MNGT_CMD_FBMIN) #define G_FW_EQ_MNGT_CMD_FBMIN(x) \ (((x) >> S_FW_EQ_MNGT_CMD_FBMIN) & M_FW_EQ_MNGT_CMD_FBMIN) #define S_FW_EQ_MNGT_CMD_FBMAX 20 #define M_FW_EQ_MNGT_CMD_FBMAX 0x7 #define V_FW_EQ_MNGT_CMD_FBMAX(x) ((x) << S_FW_EQ_MNGT_CMD_FBMAX) #define G_FW_EQ_MNGT_CMD_FBMAX(x) \ (((x) >> S_FW_EQ_MNGT_CMD_FBMAX) & M_FW_EQ_MNGT_CMD_FBMAX) #define S_FW_EQ_MNGT_CMD_CIDXFTHRESHO 19 #define M_FW_EQ_MNGT_CMD_CIDXFTHRESHO 0x1 #define V_FW_EQ_MNGT_CMD_CIDXFTHRESHO(x) \ ((x) << S_FW_EQ_MNGT_CMD_CIDXFTHRESHO) #define G_FW_EQ_MNGT_CMD_CIDXFTHRESHO(x) \ (((x) >> S_FW_EQ_MNGT_CMD_CIDXFTHRESHO) & M_FW_EQ_MNGT_CMD_CIDXFTHRESHO) #define F_FW_EQ_MNGT_CMD_CIDXFTHRESHO V_FW_EQ_MNGT_CMD_CIDXFTHRESHO(1U) #define S_FW_EQ_MNGT_CMD_CIDXFTHRESH 16 #define M_FW_EQ_MNGT_CMD_CIDXFTHRESH 0x7 #define V_FW_EQ_MNGT_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_MNGT_CMD_CIDXFTHRESH) #define G_FW_EQ_MNGT_CMD_CIDXFTHRESH(x) \ (((x) >> S_FW_EQ_MNGT_CMD_CIDXFTHRESH) & M_FW_EQ_MNGT_CMD_CIDXFTHRESH) #define S_FW_EQ_MNGT_CMD_EQSIZE 0 #define M_FW_EQ_MNGT_CMD_EQSIZE 0xffff #define V_FW_EQ_MNGT_CMD_EQSIZE(x) ((x) << S_FW_EQ_MNGT_CMD_EQSIZE) #define G_FW_EQ_MNGT_CMD_EQSIZE(x) \ (((x) >> S_FW_EQ_MNGT_CMD_EQSIZE) & M_FW_EQ_MNGT_CMD_EQSIZE) struct fw_eq_eth_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; __be32 eqid_pkd; __be32 physeqid_pkd; __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; __be32 autoequiqe_to_viid; __be32 timeren_timerix; __be64 r9; }; #define S_FW_EQ_ETH_CMD_PFN 8 #define M_FW_EQ_ETH_CMD_PFN 0x7 #define V_FW_EQ_ETH_CMD_PFN(x) ((x) << S_FW_EQ_ETH_CMD_PFN) #define G_FW_EQ_ETH_CMD_PFN(x) \ (((x) >> S_FW_EQ_ETH_CMD_PFN) & M_FW_EQ_ETH_CMD_PFN) #define S_FW_EQ_ETH_CMD_VFN 0 #define M_FW_EQ_ETH_CMD_VFN 0xff #define V_FW_EQ_ETH_CMD_VFN(x) ((x) << S_FW_EQ_ETH_CMD_VFN) #define G_FW_EQ_ETH_CMD_VFN(x) \ (((x) >> S_FW_EQ_ETH_CMD_VFN) & M_FW_EQ_ETH_CMD_VFN) #define S_FW_EQ_ETH_CMD_ALLOC 31 #define M_FW_EQ_ETH_CMD_ALLOC 0x1 #define V_FW_EQ_ETH_CMD_ALLOC(x) ((x) << S_FW_EQ_ETH_CMD_ALLOC) #define G_FW_EQ_ETH_CMD_ALLOC(x) \ (((x) >> S_FW_EQ_ETH_CMD_ALLOC) & M_FW_EQ_ETH_CMD_ALLOC) #define F_FW_EQ_ETH_CMD_ALLOC V_FW_EQ_ETH_CMD_ALLOC(1U) #define S_FW_EQ_ETH_CMD_FREE 30 #define M_FW_EQ_ETH_CMD_FREE 0x1 #define V_FW_EQ_ETH_CMD_FREE(x) ((x) << S_FW_EQ_ETH_CMD_FREE) #define G_FW_EQ_ETH_CMD_FREE(x) \ (((x) >> S_FW_EQ_ETH_CMD_FREE) & M_FW_EQ_ETH_CMD_FREE) #define F_FW_EQ_ETH_CMD_FREE V_FW_EQ_ETH_CMD_FREE(1U) #define S_FW_EQ_ETH_CMD_MODIFY 29 #define M_FW_EQ_ETH_CMD_MODIFY 0x1 #define V_FW_EQ_ETH_CMD_MODIFY(x) ((x) << S_FW_EQ_ETH_CMD_MODIFY) #define G_FW_EQ_ETH_CMD_MODIFY(x) \ (((x) >> S_FW_EQ_ETH_CMD_MODIFY) & M_FW_EQ_ETH_CMD_MODIFY) #define F_FW_EQ_ETH_CMD_MODIFY V_FW_EQ_ETH_CMD_MODIFY(1U) #define S_FW_EQ_ETH_CMD_EQSTART 28 #define M_FW_EQ_ETH_CMD_EQSTART 0x1 #define V_FW_EQ_ETH_CMD_EQSTART(x) ((x) << S_FW_EQ_ETH_CMD_EQSTART) #define G_FW_EQ_ETH_CMD_EQSTART(x) \ (((x) >> S_FW_EQ_ETH_CMD_EQSTART) & M_FW_EQ_ETH_CMD_EQSTART) #define F_FW_EQ_ETH_CMD_EQSTART V_FW_EQ_ETH_CMD_EQSTART(1U) #define S_FW_EQ_ETH_CMD_EQSTOP 27 #define M_FW_EQ_ETH_CMD_EQSTOP 0x1 #define V_FW_EQ_ETH_CMD_EQSTOP(x) ((x) << S_FW_EQ_ETH_CMD_EQSTOP) #define G_FW_EQ_ETH_CMD_EQSTOP(x) \ (((x) >> S_FW_EQ_ETH_CMD_EQSTOP) & M_FW_EQ_ETH_CMD_EQSTOP) #define F_FW_EQ_ETH_CMD_EQSTOP V_FW_EQ_ETH_CMD_EQSTOP(1U) #define S_FW_EQ_ETH_CMD_EQID 0 #define M_FW_EQ_ETH_CMD_EQID 0xfffff #define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID) #define G_FW_EQ_ETH_CMD_EQID(x) \ (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID) #define S_FW_EQ_ETH_CMD_PHYSEQID 0 #define M_FW_EQ_ETH_CMD_PHYSEQID 0xfffff #define V_FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << S_FW_EQ_ETH_CMD_PHYSEQID) #define G_FW_EQ_ETH_CMD_PHYSEQID(x) \ (((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID) #define S_FW_EQ_ETH_CMD_FETCHSZM 26 #define M_FW_EQ_ETH_CMD_FETCHSZM 0x1 #define V_FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << S_FW_EQ_ETH_CMD_FETCHSZM) #define G_FW_EQ_ETH_CMD_FETCHSZM(x) \ (((x) >> S_FW_EQ_ETH_CMD_FETCHSZM) & M_FW_EQ_ETH_CMD_FETCHSZM) #define F_FW_EQ_ETH_CMD_FETCHSZM V_FW_EQ_ETH_CMD_FETCHSZM(1U) #define S_FW_EQ_ETH_CMD_STATUSPGNS 25 #define M_FW_EQ_ETH_CMD_STATUSPGNS 0x1 #define V_FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << S_FW_EQ_ETH_CMD_STATUSPGNS) #define G_FW_EQ_ETH_CMD_STATUSPGNS(x) \ (((x) >> S_FW_EQ_ETH_CMD_STATUSPGNS) & M_FW_EQ_ETH_CMD_STATUSPGNS) #define F_FW_EQ_ETH_CMD_STATUSPGNS V_FW_EQ_ETH_CMD_STATUSPGNS(1U) #define S_FW_EQ_ETH_CMD_STATUSPGRO 24 #define M_FW_EQ_ETH_CMD_STATUSPGRO 0x1 #define V_FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << S_FW_EQ_ETH_CMD_STATUSPGRO) #define G_FW_EQ_ETH_CMD_STATUSPGRO(x) \ (((x) >> S_FW_EQ_ETH_CMD_STATUSPGRO) & M_FW_EQ_ETH_CMD_STATUSPGRO) #define F_FW_EQ_ETH_CMD_STATUSPGRO V_FW_EQ_ETH_CMD_STATUSPGRO(1U) #define S_FW_EQ_ETH_CMD_FETCHNS 23 #define M_FW_EQ_ETH_CMD_FETCHNS 0x1 #define V_FW_EQ_ETH_CMD_FETCHNS(x) ((x) << S_FW_EQ_ETH_CMD_FETCHNS) #define G_FW_EQ_ETH_CMD_FETCHNS(x) \ (((x) >> S_FW_EQ_ETH_CMD_FETCHNS) & M_FW_EQ_ETH_CMD_FETCHNS) #define F_FW_EQ_ETH_CMD_FETCHNS V_FW_EQ_ETH_CMD_FETCHNS(1U) #define S_FW_EQ_ETH_CMD_FETCHRO 22 #define M_FW_EQ_ETH_CMD_FETCHRO 0x1 #define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO) #define G_FW_EQ_ETH_CMD_FETCHRO(x) \ (((x) >> S_FW_EQ_ETH_CMD_FETCHRO) & M_FW_EQ_ETH_CMD_FETCHRO) #define F_FW_EQ_ETH_CMD_FETCHRO V_FW_EQ_ETH_CMD_FETCHRO(1U) #define S_FW_EQ_ETH_CMD_HOSTFCMODE 20 #define M_FW_EQ_ETH_CMD_HOSTFCMODE 0x3 #define V_FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_ETH_CMD_HOSTFCMODE) #define G_FW_EQ_ETH_CMD_HOSTFCMODE(x) \ (((x) >> S_FW_EQ_ETH_CMD_HOSTFCMODE) & M_FW_EQ_ETH_CMD_HOSTFCMODE) #define S_FW_EQ_ETH_CMD_CPRIO 19 #define M_FW_EQ_ETH_CMD_CPRIO 0x1 #define V_FW_EQ_ETH_CMD_CPRIO(x) ((x) << S_FW_EQ_ETH_CMD_CPRIO) #define G_FW_EQ_ETH_CMD_CPRIO(x) \ (((x) >> S_FW_EQ_ETH_CMD_CPRIO) & M_FW_EQ_ETH_CMD_CPRIO) #define F_FW_EQ_ETH_CMD_CPRIO V_FW_EQ_ETH_CMD_CPRIO(1U) #define S_FW_EQ_ETH_CMD_ONCHIP 18 #define M_FW_EQ_ETH_CMD_ONCHIP 0x1 #define V_FW_EQ_ETH_CMD_ONCHIP(x) ((x) << S_FW_EQ_ETH_CMD_ONCHIP) #define G_FW_EQ_ETH_CMD_ONCHIP(x) \ (((x) >> S_FW_EQ_ETH_CMD_ONCHIP) & M_FW_EQ_ETH_CMD_ONCHIP) #define F_FW_EQ_ETH_CMD_ONCHIP V_FW_EQ_ETH_CMD_ONCHIP(1U) #define S_FW_EQ_ETH_CMD_PCIECHN 16 #define M_FW_EQ_ETH_CMD_PCIECHN 0x3 #define V_FW_EQ_ETH_CMD_PCIECHN(x) ((x) << S_FW_EQ_ETH_CMD_PCIECHN) #define G_FW_EQ_ETH_CMD_PCIECHN(x) \ (((x) >> S_FW_EQ_ETH_CMD_PCIECHN) & M_FW_EQ_ETH_CMD_PCIECHN) #define S_FW_EQ_ETH_CMD_IQID 0 #define M_FW_EQ_ETH_CMD_IQID 0xffff #define V_FW_EQ_ETH_CMD_IQID(x) ((x) << S_FW_EQ_ETH_CMD_IQID) #define G_FW_EQ_ETH_CMD_IQID(x) \ (((x) >> S_FW_EQ_ETH_CMD_IQID) & M_FW_EQ_ETH_CMD_IQID) #define S_FW_EQ_ETH_CMD_DCAEN 31 #define M_FW_EQ_ETH_CMD_DCAEN 0x1 #define V_FW_EQ_ETH_CMD_DCAEN(x) ((x) << S_FW_EQ_ETH_CMD_DCAEN) #define G_FW_EQ_ETH_CMD_DCAEN(x) \ (((x) >> S_FW_EQ_ETH_CMD_DCAEN) & M_FW_EQ_ETH_CMD_DCAEN) #define F_FW_EQ_ETH_CMD_DCAEN V_FW_EQ_ETH_CMD_DCAEN(1U) #define S_FW_EQ_ETH_CMD_DCACPU 26 #define M_FW_EQ_ETH_CMD_DCACPU 0x1f #define V_FW_EQ_ETH_CMD_DCACPU(x) ((x) << S_FW_EQ_ETH_CMD_DCACPU) #define G_FW_EQ_ETH_CMD_DCACPU(x) \ (((x) >> S_FW_EQ_ETH_CMD_DCACPU) & M_FW_EQ_ETH_CMD_DCACPU) #define S_FW_EQ_ETH_CMD_FBMIN 23 #define M_FW_EQ_ETH_CMD_FBMIN 0x7 #define V_FW_EQ_ETH_CMD_FBMIN(x) ((x) << S_FW_EQ_ETH_CMD_FBMIN) #define G_FW_EQ_ETH_CMD_FBMIN(x) \ (((x) >> S_FW_EQ_ETH_CMD_FBMIN) & M_FW_EQ_ETH_CMD_FBMIN) #define S_FW_EQ_ETH_CMD_FBMAX 20 #define M_FW_EQ_ETH_CMD_FBMAX 0x7 #define V_FW_EQ_ETH_CMD_FBMAX(x) ((x) << S_FW_EQ_ETH_CMD_FBMAX) #define G_FW_EQ_ETH_CMD_FBMAX(x) \ (((x) >> S_FW_EQ_ETH_CMD_FBMAX) & M_FW_EQ_ETH_CMD_FBMAX) #define S_FW_EQ_ETH_CMD_CIDXFTHRESHO 19 #define M_FW_EQ_ETH_CMD_CIDXFTHRESHO 0x1 #define V_FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << S_FW_EQ_ETH_CMD_CIDXFTHRESHO) #define G_FW_EQ_ETH_CMD_CIDXFTHRESHO(x) \ (((x) >> S_FW_EQ_ETH_CMD_CIDXFTHRESHO) & M_FW_EQ_ETH_CMD_CIDXFTHRESHO) #define F_FW_EQ_ETH_CMD_CIDXFTHRESHO V_FW_EQ_ETH_CMD_CIDXFTHRESHO(1U) #define S_FW_EQ_ETH_CMD_CIDXFTHRESH 16 #define M_FW_EQ_ETH_CMD_CIDXFTHRESH 0x7 #define V_FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_ETH_CMD_CIDXFTHRESH) #define G_FW_EQ_ETH_CMD_CIDXFTHRESH(x) \ (((x) >> S_FW_EQ_ETH_CMD_CIDXFTHRESH) & M_FW_EQ_ETH_CMD_CIDXFTHRESH) #define S_FW_EQ_ETH_CMD_EQSIZE 0 #define M_FW_EQ_ETH_CMD_EQSIZE 0xffff #define V_FW_EQ_ETH_CMD_EQSIZE(x) ((x) << S_FW_EQ_ETH_CMD_EQSIZE) #define G_FW_EQ_ETH_CMD_EQSIZE(x) \ (((x) >> S_FW_EQ_ETH_CMD_EQSIZE) & M_FW_EQ_ETH_CMD_EQSIZE) #define S_FW_EQ_ETH_CMD_AUTOEQUIQE 31 #define M_FW_EQ_ETH_CMD_AUTOEQUIQE 0x1 #define V_FW_EQ_ETH_CMD_AUTOEQUIQE(x) ((x) << S_FW_EQ_ETH_CMD_AUTOEQUIQE) #define G_FW_EQ_ETH_CMD_AUTOEQUIQE(x) \ (((x) >> S_FW_EQ_ETH_CMD_AUTOEQUIQE) & M_FW_EQ_ETH_CMD_AUTOEQUIQE) #define F_FW_EQ_ETH_CMD_AUTOEQUIQE V_FW_EQ_ETH_CMD_AUTOEQUIQE(1U) #define S_FW_EQ_ETH_CMD_AUTOEQUEQE 30 #define M_FW_EQ_ETH_CMD_AUTOEQUEQE 0x1 #define V_FW_EQ_ETH_CMD_AUTOEQUEQE(x) ((x) << S_FW_EQ_ETH_CMD_AUTOEQUEQE) #define G_FW_EQ_ETH_CMD_AUTOEQUEQE(x) \ (((x) >> S_FW_EQ_ETH_CMD_AUTOEQUEQE) & M_FW_EQ_ETH_CMD_AUTOEQUEQE) #define F_FW_EQ_ETH_CMD_AUTOEQUEQE V_FW_EQ_ETH_CMD_AUTOEQUEQE(1U) #define S_FW_EQ_ETH_CMD_VIID 16 #define M_FW_EQ_ETH_CMD_VIID 0xfff #define V_FW_EQ_ETH_CMD_VIID(x) ((x) << S_FW_EQ_ETH_CMD_VIID) #define G_FW_EQ_ETH_CMD_VIID(x) \ (((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID) #define S_FW_EQ_ETH_CMD_TIMEREN 3 #define M_FW_EQ_ETH_CMD_TIMEREN 0x1 #define V_FW_EQ_ETH_CMD_TIMEREN(x) ((x) << S_FW_EQ_ETH_CMD_TIMEREN) #define G_FW_EQ_ETH_CMD_TIMEREN(x) \ (((x) >> S_FW_EQ_ETH_CMD_TIMEREN) & M_FW_EQ_ETH_CMD_TIMEREN) #define F_FW_EQ_ETH_CMD_TIMEREN V_FW_EQ_ETH_CMD_TIMEREN(1U) #define S_FW_EQ_ETH_CMD_TIMERIX 0 #define M_FW_EQ_ETH_CMD_TIMERIX 0x7 #define V_FW_EQ_ETH_CMD_TIMERIX(x) ((x) << S_FW_EQ_ETH_CMD_TIMERIX) #define G_FW_EQ_ETH_CMD_TIMERIX(x) \ (((x) >> S_FW_EQ_ETH_CMD_TIMERIX) & M_FW_EQ_ETH_CMD_TIMERIX) struct fw_eq_ctrl_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; __be32 cmpliqid_eqid; __be32 physeqid_pkd; __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; }; #define S_FW_EQ_CTRL_CMD_PFN 8 #define M_FW_EQ_CTRL_CMD_PFN 0x7 #define V_FW_EQ_CTRL_CMD_PFN(x) ((x) << S_FW_EQ_CTRL_CMD_PFN) #define G_FW_EQ_CTRL_CMD_PFN(x) \ (((x) >> S_FW_EQ_CTRL_CMD_PFN) & M_FW_EQ_CTRL_CMD_PFN) #define S_FW_EQ_CTRL_CMD_VFN 0 #define M_FW_EQ_CTRL_CMD_VFN 0xff #define V_FW_EQ_CTRL_CMD_VFN(x) ((x) << S_FW_EQ_CTRL_CMD_VFN) #define G_FW_EQ_CTRL_CMD_VFN(x) \ (((x) >> S_FW_EQ_CTRL_CMD_VFN) & M_FW_EQ_CTRL_CMD_VFN) #define S_FW_EQ_CTRL_CMD_ALLOC 31 #define M_FW_EQ_CTRL_CMD_ALLOC 0x1 #define V_FW_EQ_CTRL_CMD_ALLOC(x) ((x) << S_FW_EQ_CTRL_CMD_ALLOC) #define G_FW_EQ_CTRL_CMD_ALLOC(x) \ (((x) >> S_FW_EQ_CTRL_CMD_ALLOC) & M_FW_EQ_CTRL_CMD_ALLOC) #define F_FW_EQ_CTRL_CMD_ALLOC V_FW_EQ_CTRL_CMD_ALLOC(1U) #define S_FW_EQ_CTRL_CMD_FREE 30 #define M_FW_EQ_CTRL_CMD_FREE 0x1 #define V_FW_EQ_CTRL_CMD_FREE(x) ((x) << S_FW_EQ_CTRL_CMD_FREE) #define G_FW_EQ_CTRL_CMD_FREE(x) \ (((x) >> S_FW_EQ_CTRL_CMD_FREE) & M_FW_EQ_CTRL_CMD_FREE) #define F_FW_EQ_CTRL_CMD_FREE V_FW_EQ_CTRL_CMD_FREE(1U) #define S_FW_EQ_CTRL_CMD_MODIFY 29 #define M_FW_EQ_CTRL_CMD_MODIFY 0x1 #define V_FW_EQ_CTRL_CMD_MODIFY(x) ((x) << S_FW_EQ_CTRL_CMD_MODIFY) #define G_FW_EQ_CTRL_CMD_MODIFY(x) \ (((x) >> S_FW_EQ_CTRL_CMD_MODIFY) & M_FW_EQ_CTRL_CMD_MODIFY) #define F_FW_EQ_CTRL_CMD_MODIFY V_FW_EQ_CTRL_CMD_MODIFY(1U) #define S_FW_EQ_CTRL_CMD_EQSTART 28 #define M_FW_EQ_CTRL_CMD_EQSTART 0x1 #define V_FW_EQ_CTRL_CMD_EQSTART(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTART) #define G_FW_EQ_CTRL_CMD_EQSTART(x) \ (((x) >> S_FW_EQ_CTRL_CMD_EQSTART) & M_FW_EQ_CTRL_CMD_EQSTART) #define F_FW_EQ_CTRL_CMD_EQSTART V_FW_EQ_CTRL_CMD_EQSTART(1U) #define S_FW_EQ_CTRL_CMD_EQSTOP 27 #define M_FW_EQ_CTRL_CMD_EQSTOP 0x1 #define V_FW_EQ_CTRL_CMD_EQSTOP(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTOP) #define G_FW_EQ_CTRL_CMD_EQSTOP(x) \ (((x) >> S_FW_EQ_CTRL_CMD_EQSTOP) & M_FW_EQ_CTRL_CMD_EQSTOP) #define F_FW_EQ_CTRL_CMD_EQSTOP V_FW_EQ_CTRL_CMD_EQSTOP(1U) #define S_FW_EQ_CTRL_CMD_CMPLIQID 20 #define M_FW_EQ_CTRL_CMD_CMPLIQID 0xfff #define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID) #define G_FW_EQ_CTRL_CMD_CMPLIQID(x) \ (((x) >> S_FW_EQ_CTRL_CMD_CMPLIQID) & M_FW_EQ_CTRL_CMD_CMPLIQID) #define S_FW_EQ_CTRL_CMD_EQID 0 #define M_FW_EQ_CTRL_CMD_EQID 0xfffff #define V_FW_EQ_CTRL_CMD_EQID(x) ((x) << S_FW_EQ_CTRL_CMD_EQID) #define G_FW_EQ_CTRL_CMD_EQID(x) \ (((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID) #define S_FW_EQ_CTRL_CMD_PHYSEQID 0 #define M_FW_EQ_CTRL_CMD_PHYSEQID 0xfffff #define V_FW_EQ_CTRL_CMD_PHYSEQID(x) ((x) << S_FW_EQ_CTRL_CMD_PHYSEQID) #define G_FW_EQ_CTRL_CMD_PHYSEQID(x) \ (((x) >> S_FW_EQ_CTRL_CMD_PHYSEQID) & M_FW_EQ_CTRL_CMD_PHYSEQID) #define S_FW_EQ_CTRL_CMD_FETCHSZM 26 #define M_FW_EQ_CTRL_CMD_FETCHSZM 0x1 #define V_FW_EQ_CTRL_CMD_FETCHSZM(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHSZM) #define G_FW_EQ_CTRL_CMD_FETCHSZM(x) \ (((x) >> S_FW_EQ_CTRL_CMD_FETCHSZM) & M_FW_EQ_CTRL_CMD_FETCHSZM) #define F_FW_EQ_CTRL_CMD_FETCHSZM V_FW_EQ_CTRL_CMD_FETCHSZM(1U) #define S_FW_EQ_CTRL_CMD_STATUSPGNS 25 #define M_FW_EQ_CTRL_CMD_STATUSPGNS 0x1 #define V_FW_EQ_CTRL_CMD_STATUSPGNS(x) ((x) << S_FW_EQ_CTRL_CMD_STATUSPGNS) #define G_FW_EQ_CTRL_CMD_STATUSPGNS(x) \ (((x) >> S_FW_EQ_CTRL_CMD_STATUSPGNS) & M_FW_EQ_CTRL_CMD_STATUSPGNS) #define F_FW_EQ_CTRL_CMD_STATUSPGNS V_FW_EQ_CTRL_CMD_STATUSPGNS(1U) #define S_FW_EQ_CTRL_CMD_STATUSPGRO 24 #define M_FW_EQ_CTRL_CMD_STATUSPGRO 0x1 #define V_FW_EQ_CTRL_CMD_STATUSPGRO(x) ((x) << S_FW_EQ_CTRL_CMD_STATUSPGRO) #define G_FW_EQ_CTRL_CMD_STATUSPGRO(x) \ (((x) >> S_FW_EQ_CTRL_CMD_STATUSPGRO) & M_FW_EQ_CTRL_CMD_STATUSPGRO) #define F_FW_EQ_CTRL_CMD_STATUSPGRO V_FW_EQ_CTRL_CMD_STATUSPGRO(1U) #define S_FW_EQ_CTRL_CMD_FETCHNS 23 #define M_FW_EQ_CTRL_CMD_FETCHNS 0x1 #define V_FW_EQ_CTRL_CMD_FETCHNS(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHNS) #define G_FW_EQ_CTRL_CMD_FETCHNS(x) \ (((x) >> S_FW_EQ_CTRL_CMD_FETCHNS) & M_FW_EQ_CTRL_CMD_FETCHNS) #define F_FW_EQ_CTRL_CMD_FETCHNS V_FW_EQ_CTRL_CMD_FETCHNS(1U) #define S_FW_EQ_CTRL_CMD_FETCHRO 22 #define M_FW_EQ_CTRL_CMD_FETCHRO 0x1 #define V_FW_EQ_CTRL_CMD_FETCHRO(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHRO) #define G_FW_EQ_CTRL_CMD_FETCHRO(x) \ (((x) >> S_FW_EQ_CTRL_CMD_FETCHRO) & M_FW_EQ_CTRL_CMD_FETCHRO) #define F_FW_EQ_CTRL_CMD_FETCHRO V_FW_EQ_CTRL_CMD_FETCHRO(1U) #define S_FW_EQ_CTRL_CMD_HOSTFCMODE 20 #define M_FW_EQ_CTRL_CMD_HOSTFCMODE 0x3 #define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE) #define G_FW_EQ_CTRL_CMD_HOSTFCMODE(x) \ (((x) >> S_FW_EQ_CTRL_CMD_HOSTFCMODE) & M_FW_EQ_CTRL_CMD_HOSTFCMODE) #define S_FW_EQ_CTRL_CMD_CPRIO 19 #define M_FW_EQ_CTRL_CMD_CPRIO 0x1 #define V_FW_EQ_CTRL_CMD_CPRIO(x) ((x) << S_FW_EQ_CTRL_CMD_CPRIO) #define G_FW_EQ_CTRL_CMD_CPRIO(x) \ (((x) >> S_FW_EQ_CTRL_CMD_CPRIO) & M_FW_EQ_CTRL_CMD_CPRIO) #define F_FW_EQ_CTRL_CMD_CPRIO V_FW_EQ_CTRL_CMD_CPRIO(1U) #define S_FW_EQ_CTRL_CMD_ONCHIP 18 #define M_FW_EQ_CTRL_CMD_ONCHIP 0x1 #define V_FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << S_FW_EQ_CTRL_CMD_ONCHIP) #define G_FW_EQ_CTRL_CMD_ONCHIP(x) \ (((x) >> S_FW_EQ_CTRL_CMD_ONCHIP) & M_FW_EQ_CTRL_CMD_ONCHIP) #define F_FW_EQ_CTRL_CMD_ONCHIP V_FW_EQ_CTRL_CMD_ONCHIP(1U) #define S_FW_EQ_CTRL_CMD_PCIECHN 16 #define M_FW_EQ_CTRL_CMD_PCIECHN 0x3 #define V_FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << S_FW_EQ_CTRL_CMD_PCIECHN) #define G_FW_EQ_CTRL_CMD_PCIECHN(x) \ (((x) >> S_FW_EQ_CTRL_CMD_PCIECHN) & M_FW_EQ_CTRL_CMD_PCIECHN) #define S_FW_EQ_CTRL_CMD_IQID 0 #define M_FW_EQ_CTRL_CMD_IQID 0xffff #define V_FW_EQ_CTRL_CMD_IQID(x) ((x) << S_FW_EQ_CTRL_CMD_IQID) #define G_FW_EQ_CTRL_CMD_IQID(x) \ (((x) >> S_FW_EQ_CTRL_CMD_IQID) & M_FW_EQ_CTRL_CMD_IQID) #define S_FW_EQ_CTRL_CMD_DCAEN 31 #define M_FW_EQ_CTRL_CMD_DCAEN 0x1 #define V_FW_EQ_CTRL_CMD_DCAEN(x) ((x) << S_FW_EQ_CTRL_CMD_DCAEN) #define G_FW_EQ_CTRL_CMD_DCAEN(x) \ (((x) >> S_FW_EQ_CTRL_CMD_DCAEN) & M_FW_EQ_CTRL_CMD_DCAEN) #define F_FW_EQ_CTRL_CMD_DCAEN V_FW_EQ_CTRL_CMD_DCAEN(1U) #define S_FW_EQ_CTRL_CMD_DCACPU 26 #define M_FW_EQ_CTRL_CMD_DCACPU 0x1f #define V_FW_EQ_CTRL_CMD_DCACPU(x) ((x) << S_FW_EQ_CTRL_CMD_DCACPU) #define G_FW_EQ_CTRL_CMD_DCACPU(x) \ (((x) >> S_FW_EQ_CTRL_CMD_DCACPU) & M_FW_EQ_CTRL_CMD_DCACPU) #define S_FW_EQ_CTRL_CMD_FBMIN 23 #define M_FW_EQ_CTRL_CMD_FBMIN 0x7 #define V_FW_EQ_CTRL_CMD_FBMIN(x) ((x) << S_FW_EQ_CTRL_CMD_FBMIN) #define G_FW_EQ_CTRL_CMD_FBMIN(x) \ (((x) >> S_FW_EQ_CTRL_CMD_FBMIN) & M_FW_EQ_CTRL_CMD_FBMIN) #define S_FW_EQ_CTRL_CMD_FBMAX 20 #define M_FW_EQ_CTRL_CMD_FBMAX 0x7 #define V_FW_EQ_CTRL_CMD_FBMAX(x) ((x) << S_FW_EQ_CTRL_CMD_FBMAX) #define G_FW_EQ_CTRL_CMD_FBMAX(x) \ (((x) >> S_FW_EQ_CTRL_CMD_FBMAX) & M_FW_EQ_CTRL_CMD_FBMAX) #define S_FW_EQ_CTRL_CMD_CIDXFTHRESHO 19 #define M_FW_EQ_CTRL_CMD_CIDXFTHRESHO 0x1 #define V_FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) \ ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESHO) #define G_FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) \ (((x) >> S_FW_EQ_CTRL_CMD_CIDXFTHRESHO) & M_FW_EQ_CTRL_CMD_CIDXFTHRESHO) #define F_FW_EQ_CTRL_CMD_CIDXFTHRESHO V_FW_EQ_CTRL_CMD_CIDXFTHRESHO(1U) #define S_FW_EQ_CTRL_CMD_CIDXFTHRESH 16 #define M_FW_EQ_CTRL_CMD_CIDXFTHRESH 0x7 #define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH) #define G_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) \ (((x) >> S_FW_EQ_CTRL_CMD_CIDXFTHRESH) & M_FW_EQ_CTRL_CMD_CIDXFTHRESH) #define S_FW_EQ_CTRL_CMD_EQSIZE 0 #define M_FW_EQ_CTRL_CMD_EQSIZE 0xffff #define V_FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << S_FW_EQ_CTRL_CMD_EQSIZE) #define G_FW_EQ_CTRL_CMD_EQSIZE(x) \ (((x) >> S_FW_EQ_CTRL_CMD_EQSIZE) & M_FW_EQ_CTRL_CMD_EQSIZE) struct fw_eq_ofld_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; __be32 eqid_pkd; __be32 physeqid_pkd; __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; }; #define S_FW_EQ_OFLD_CMD_PFN 8 #define M_FW_EQ_OFLD_CMD_PFN 0x7 #define V_FW_EQ_OFLD_CMD_PFN(x) ((x) << S_FW_EQ_OFLD_CMD_PFN) #define G_FW_EQ_OFLD_CMD_PFN(x) \ (((x) >> S_FW_EQ_OFLD_CMD_PFN) & M_FW_EQ_OFLD_CMD_PFN) #define S_FW_EQ_OFLD_CMD_VFN 0 #define M_FW_EQ_OFLD_CMD_VFN 0xff #define V_FW_EQ_OFLD_CMD_VFN(x) ((x) << S_FW_EQ_OFLD_CMD_VFN) #define G_FW_EQ_OFLD_CMD_VFN(x) \ (((x) >> S_FW_EQ_OFLD_CMD_VFN) & M_FW_EQ_OFLD_CMD_VFN) #define S_FW_EQ_OFLD_CMD_ALLOC 31 #define M_FW_EQ_OFLD_CMD_ALLOC 0x1 #define V_FW_EQ_OFLD_CMD_ALLOC(x) ((x) << S_FW_EQ_OFLD_CMD_ALLOC) #define G_FW_EQ_OFLD_CMD_ALLOC(x) \ (((x) >> S_FW_EQ_OFLD_CMD_ALLOC) & M_FW_EQ_OFLD_CMD_ALLOC) #define F_FW_EQ_OFLD_CMD_ALLOC V_FW_EQ_OFLD_CMD_ALLOC(1U) #define S_FW_EQ_OFLD_CMD_FREE 30 #define M_FW_EQ_OFLD_CMD_FREE 0x1 #define V_FW_EQ_OFLD_CMD_FREE(x) ((x) << S_FW_EQ_OFLD_CMD_FREE) #define G_FW_EQ_OFLD_CMD_FREE(x) \ (((x) >> S_FW_EQ_OFLD_CMD_FREE) & M_FW_EQ_OFLD_CMD_FREE) #define F_FW_EQ_OFLD_CMD_FREE V_FW_EQ_OFLD_CMD_FREE(1U) #define S_FW_EQ_OFLD_CMD_MODIFY 29 #define M_FW_EQ_OFLD_CMD_MODIFY 0x1 #define V_FW_EQ_OFLD_CMD_MODIFY(x) ((x) << S_FW_EQ_OFLD_CMD_MODIFY) #define G_FW_EQ_OFLD_CMD_MODIFY(x) \ (((x) >> S_FW_EQ_OFLD_CMD_MODIFY) & M_FW_EQ_OFLD_CMD_MODIFY) #define F_FW_EQ_OFLD_CMD_MODIFY V_FW_EQ_OFLD_CMD_MODIFY(1U) #define S_FW_EQ_OFLD_CMD_EQSTART 28 #define M_FW_EQ_OFLD_CMD_EQSTART 0x1 #define V_FW_EQ_OFLD_CMD_EQSTART(x) ((x) << S_FW_EQ_OFLD_CMD_EQSTART) #define G_FW_EQ_OFLD_CMD_EQSTART(x) \ (((x) >> S_FW_EQ_OFLD_CMD_EQSTART) & M_FW_EQ_OFLD_CMD_EQSTART) #define F_FW_EQ_OFLD_CMD_EQSTART V_FW_EQ_OFLD_CMD_EQSTART(1U) #define S_FW_EQ_OFLD_CMD_EQSTOP 27 #define M_FW_EQ_OFLD_CMD_EQSTOP 0x1 #define V_FW_EQ_OFLD_CMD_EQSTOP(x) ((x) << S_FW_EQ_OFLD_CMD_EQSTOP) #define G_FW_EQ_OFLD_CMD_EQSTOP(x) \ (((x) >> S_FW_EQ_OFLD_CMD_EQSTOP) & M_FW_EQ_OFLD_CMD_EQSTOP) #define F_FW_EQ_OFLD_CMD_EQSTOP V_FW_EQ_OFLD_CMD_EQSTOP(1U) #define S_FW_EQ_OFLD_CMD_EQID 0 #define M_FW_EQ_OFLD_CMD_EQID 0xfffff #define V_FW_EQ_OFLD_CMD_EQID(x) ((x) << S_FW_EQ_OFLD_CMD_EQID) #define G_FW_EQ_OFLD_CMD_EQID(x) \ (((x) >> S_FW_EQ_OFLD_CMD_EQID) & M_FW_EQ_OFLD_CMD_EQID) #define S_FW_EQ_OFLD_CMD_PHYSEQID 0 #define M_FW_EQ_OFLD_CMD_PHYSEQID 0xfffff #define V_FW_EQ_OFLD_CMD_PHYSEQID(x) ((x) << S_FW_EQ_OFLD_CMD_PHYSEQID) #define G_FW_EQ_OFLD_CMD_PHYSEQID(x) \ (((x) >> S_FW_EQ_OFLD_CMD_PHYSEQID) & M_FW_EQ_OFLD_CMD_PHYSEQID) #define S_FW_EQ_OFLD_CMD_FETCHSZM 26 #define M_FW_EQ_OFLD_CMD_FETCHSZM 0x1 #define V_FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << S_FW_EQ_OFLD_CMD_FETCHSZM) #define G_FW_EQ_OFLD_CMD_FETCHSZM(x) \ (((x) >> S_FW_EQ_OFLD_CMD_FETCHSZM) & M_FW_EQ_OFLD_CMD_FETCHSZM) #define F_FW_EQ_OFLD_CMD_FETCHSZM V_FW_EQ_OFLD_CMD_FETCHSZM(1U) #define S_FW_EQ_OFLD_CMD_STATUSPGNS 25 #define M_FW_EQ_OFLD_CMD_STATUSPGNS 0x1 #define V_FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << S_FW_EQ_OFLD_CMD_STATUSPGNS) #define G_FW_EQ_OFLD_CMD_STATUSPGNS(x) \ (((x) >> S_FW_EQ_OFLD_CMD_STATUSPGNS) & M_FW_EQ_OFLD_CMD_STATUSPGNS) #define F_FW_EQ_OFLD_CMD_STATUSPGNS V_FW_EQ_OFLD_CMD_STATUSPGNS(1U) #define S_FW_EQ_OFLD_CMD_STATUSPGRO 24 #define M_FW_EQ_OFLD_CMD_STATUSPGRO 0x1 #define V_FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << S_FW_EQ_OFLD_CMD_STATUSPGRO) #define G_FW_EQ_OFLD_CMD_STATUSPGRO(x) \ (((x) >> S_FW_EQ_OFLD_CMD_STATUSPGRO) & M_FW_EQ_OFLD_CMD_STATUSPGRO) #define F_FW_EQ_OFLD_CMD_STATUSPGRO V_FW_EQ_OFLD_CMD_STATUSPGRO(1U) #define S_FW_EQ_OFLD_CMD_FETCHNS 23 #define M_FW_EQ_OFLD_CMD_FETCHNS 0x1 #define V_FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << S_FW_EQ_OFLD_CMD_FETCHNS) #define G_FW_EQ_OFLD_CMD_FETCHNS(x) \ (((x) >> S_FW_EQ_OFLD_CMD_FETCHNS) & M_FW_EQ_OFLD_CMD_FETCHNS) #define F_FW_EQ_OFLD_CMD_FETCHNS V_FW_EQ_OFLD_CMD_FETCHNS(1U) #define S_FW_EQ_OFLD_CMD_FETCHRO 22 #define M_FW_EQ_OFLD_CMD_FETCHRO 0x1 #define V_FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << S_FW_EQ_OFLD_CMD_FETCHRO) #define G_FW_EQ_OFLD_CMD_FETCHRO(x) \ (((x) >> S_FW_EQ_OFLD_CMD_FETCHRO) & M_FW_EQ_OFLD_CMD_FETCHRO) #define F_FW_EQ_OFLD_CMD_FETCHRO V_FW_EQ_OFLD_CMD_FETCHRO(1U) #define S_FW_EQ_OFLD_CMD_HOSTFCMODE 20 #define M_FW_EQ_OFLD_CMD_HOSTFCMODE 0x3 #define V_FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_OFLD_CMD_HOSTFCMODE) #define G_FW_EQ_OFLD_CMD_HOSTFCMODE(x) \ (((x) >> S_FW_EQ_OFLD_CMD_HOSTFCMODE) & M_FW_EQ_OFLD_CMD_HOSTFCMODE) #define S_FW_EQ_OFLD_CMD_CPRIO 19 #define M_FW_EQ_OFLD_CMD_CPRIO 0x1 #define V_FW_EQ_OFLD_CMD_CPRIO(x) ((x) << S_FW_EQ_OFLD_CMD_CPRIO) #define G_FW_EQ_OFLD_CMD_CPRIO(x) \ (((x) >> S_FW_EQ_OFLD_CMD_CPRIO) & M_FW_EQ_OFLD_CMD_CPRIO) #define F_FW_EQ_OFLD_CMD_CPRIO V_FW_EQ_OFLD_CMD_CPRIO(1U) #define S_FW_EQ_OFLD_CMD_ONCHIP 18 #define M_FW_EQ_OFLD_CMD_ONCHIP 0x1 #define V_FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << S_FW_EQ_OFLD_CMD_ONCHIP) #define G_FW_EQ_OFLD_CMD_ONCHIP(x) \ (((x) >> S_FW_EQ_OFLD_CMD_ONCHIP) & M_FW_EQ_OFLD_CMD_ONCHIP) #define F_FW_EQ_OFLD_CMD_ONCHIP V_FW_EQ_OFLD_CMD_ONCHIP(1U) #define S_FW_EQ_OFLD_CMD_PCIECHN 16 #define M_FW_EQ_OFLD_CMD_PCIECHN 0x3 #define V_FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << S_FW_EQ_OFLD_CMD_PCIECHN) #define G_FW_EQ_OFLD_CMD_PCIECHN(x) \ (((x) >> S_FW_EQ_OFLD_CMD_PCIECHN) & M_FW_EQ_OFLD_CMD_PCIECHN) #define S_FW_EQ_OFLD_CMD_IQID 0 #define M_FW_EQ_OFLD_CMD_IQID 0xffff #define V_FW_EQ_OFLD_CMD_IQID(x) ((x) << S_FW_EQ_OFLD_CMD_IQID) #define G_FW_EQ_OFLD_CMD_IQID(x) \ (((x) >> S_FW_EQ_OFLD_CMD_IQID) & M_FW_EQ_OFLD_CMD_IQID) #define S_FW_EQ_OFLD_CMD_DCAEN 31 #define M_FW_EQ_OFLD_CMD_DCAEN 0x1 #define V_FW_EQ_OFLD_CMD_DCAEN(x) ((x) << S_FW_EQ_OFLD_CMD_DCAEN) #define G_FW_EQ_OFLD_CMD_DCAEN(x) \ (((x) >> S_FW_EQ_OFLD_CMD_DCAEN) & M_FW_EQ_OFLD_CMD_DCAEN) #define F_FW_EQ_OFLD_CMD_DCAEN V_FW_EQ_OFLD_CMD_DCAEN(1U) #define S_FW_EQ_OFLD_CMD_DCACPU 26 #define M_FW_EQ_OFLD_CMD_DCACPU 0x1f #define V_FW_EQ_OFLD_CMD_DCACPU(x) ((x) << S_FW_EQ_OFLD_CMD_DCACPU) #define G_FW_EQ_OFLD_CMD_DCACPU(x) \ (((x) >> S_FW_EQ_OFLD_CMD_DCACPU) & M_FW_EQ_OFLD_CMD_DCACPU) #define S_FW_EQ_OFLD_CMD_FBMIN 23 #define M_FW_EQ_OFLD_CMD_FBMIN 0x7 #define V_FW_EQ_OFLD_CMD_FBMIN(x) ((x) << S_FW_EQ_OFLD_CMD_FBMIN) #define G_FW_EQ_OFLD_CMD_FBMIN(x) \ (((x) >> S_FW_EQ_OFLD_CMD_FBMIN) & M_FW_EQ_OFLD_CMD_FBMIN) #define S_FW_EQ_OFLD_CMD_FBMAX 20 #define M_FW_EQ_OFLD_CMD_FBMAX 0x7 #define V_FW_EQ_OFLD_CMD_FBMAX(x) ((x) << S_FW_EQ_OFLD_CMD_FBMAX) #define G_FW_EQ_OFLD_CMD_FBMAX(x) \ (((x) >> S_FW_EQ_OFLD_CMD_FBMAX) & M_FW_EQ_OFLD_CMD_FBMAX) #define S_FW_EQ_OFLD_CMD_CIDXFTHRESHO 19 #define M_FW_EQ_OFLD_CMD_CIDXFTHRESHO 0x1 #define V_FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) \ ((x) << S_FW_EQ_OFLD_CMD_CIDXFTHRESHO) #define G_FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) \ (((x) >> S_FW_EQ_OFLD_CMD_CIDXFTHRESHO) & M_FW_EQ_OFLD_CMD_CIDXFTHRESHO) #define F_FW_EQ_OFLD_CMD_CIDXFTHRESHO V_FW_EQ_OFLD_CMD_CIDXFTHRESHO(1U) #define S_FW_EQ_OFLD_CMD_CIDXFTHRESH 16 #define M_FW_EQ_OFLD_CMD_CIDXFTHRESH 0x7 #define V_FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_OFLD_CMD_CIDXFTHRESH) #define G_FW_EQ_OFLD_CMD_CIDXFTHRESH(x) \ (((x) >> S_FW_EQ_OFLD_CMD_CIDXFTHRESH) & M_FW_EQ_OFLD_CMD_CIDXFTHRESH) #define S_FW_EQ_OFLD_CMD_EQSIZE 0 #define M_FW_EQ_OFLD_CMD_EQSIZE 0xffff #define V_FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << S_FW_EQ_OFLD_CMD_EQSIZE) #define G_FW_EQ_OFLD_CMD_EQSIZE(x) \ (((x) >> S_FW_EQ_OFLD_CMD_EQSIZE) & M_FW_EQ_OFLD_CMD_EQSIZE) /* Following macros present here only to maintain backward * compatibiity. Driver must not use these anymore */ /* Macros for VIID parsing: VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number */ #define S_FW_VIID_PFN 8 #define M_FW_VIID_PFN 0x7 #define V_FW_VIID_PFN(x) ((x) << S_FW_VIID_PFN) #define G_FW_VIID_PFN(x) (((x) >> S_FW_VIID_PFN) & M_FW_VIID_PFN) #define S_FW_VIID_VIVLD 7 #define M_FW_VIID_VIVLD 0x1 #define V_FW_VIID_VIVLD(x) ((x) << S_FW_VIID_VIVLD) #define G_FW_VIID_VIVLD(x) (((x) >> S_FW_VIID_VIVLD) & M_FW_VIID_VIVLD) #define S_FW_VIID_VIN 0 #define M_FW_VIID_VIN 0x7F #define V_FW_VIID_VIN(x) ((x) << S_FW_VIID_VIN) #define G_FW_VIID_VIN(x) (((x) >> S_FW_VIID_VIN) & M_FW_VIID_VIN) /* Macros for VIID parsing: VIID - [11:9] PFN, [8] VI Valid, [7:0] VI number */ #define S_FW_256VIID_PFN 9 #define M_FW_256VIID_PFN 0x7 #define V_FW_256VIID_PFN(x) ((x) << S_FW_256VIID_PFN) #define G_FW_256VIID_PFN(x) (((x) >> S_FW_256VIID_PFN) & M_FW_256VIID_PFN) #define S_FW_256VIID_VIVLD 8 #define M_FW_256VIID_VIVLD 0x1 #define V_FW_256VIID_VIVLD(x) ((x) << S_FW_256VIID_VIVLD) #define G_FW_256VIID_VIVLD(x) (((x) >> S_FW_256VIID_VIVLD) & M_FW_256VIID_VIVLD) #define S_FW_256VIID_VIN 0 #define M_FW_256VIID_VIN 0xFF #define V_FW_256VIID_VIN(x) ((x) << S_FW_256VIID_VIN) #define G_FW_256VIID_VIN(x) (((x) >> S_FW_256VIID_VIN) & M_FW_256VIID_VIN) enum fw_vi_func { FW_VI_FUNC_ETH, FW_VI_FUNC_OFLD, FW_VI_FUNC_IWARP, FW_VI_FUNC_OPENISCSI, FW_VI_FUNC_OPENFCOE, FW_VI_FUNC_FOISCSI, FW_VI_FUNC_FOFCOE, FW_VI_FUNC_FW, }; struct fw_vi_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; __be16 type_to_viid; __u8 mac[6]; __u8 portid_pkd; __u8 nmac; __u8 nmac0[6]; __be16 norss_rsssize; __u8 nmac1[6]; __be16 idsiiq_pkd; __u8 nmac2[6]; __be16 idseiq_pkd; __u8 nmac3[6]; __be64 r9; __be64 r10; }; #define S_FW_VI_CMD_PFN 8 #define M_FW_VI_CMD_PFN 0x7 #define V_FW_VI_CMD_PFN(x) ((x) << S_FW_VI_CMD_PFN) #define G_FW_VI_CMD_PFN(x) \ (((x) >> S_FW_VI_CMD_PFN) & M_FW_VI_CMD_PFN) #define S_FW_VI_CMD_VFN 0 #define M_FW_VI_CMD_VFN 0xff #define V_FW_VI_CMD_VFN(x) ((x) << S_FW_VI_CMD_VFN) #define G_FW_VI_CMD_VFN(x) \ (((x) >> S_FW_VI_CMD_VFN) & M_FW_VI_CMD_VFN) #define S_FW_VI_CMD_ALLOC 31 #define M_FW_VI_CMD_ALLOC 0x1 #define V_FW_VI_CMD_ALLOC(x) ((x) << S_FW_VI_CMD_ALLOC) #define G_FW_VI_CMD_ALLOC(x) \ (((x) >> S_FW_VI_CMD_ALLOC) & M_FW_VI_CMD_ALLOC) #define F_FW_VI_CMD_ALLOC V_FW_VI_CMD_ALLOC(1U) #define S_FW_VI_CMD_FREE 30 #define M_FW_VI_CMD_FREE 0x1 #define V_FW_VI_CMD_FREE(x) ((x) << S_FW_VI_CMD_FREE) #define G_FW_VI_CMD_FREE(x) \ (((x) >> S_FW_VI_CMD_FREE) & M_FW_VI_CMD_FREE) #define F_FW_VI_CMD_FREE V_FW_VI_CMD_FREE(1U) #define S_FW_VI_CMD_VFVLD 24 #define M_FW_VI_CMD_VFVLD 0x1 #define V_FW_VI_CMD_VFVLD(x) ((x) << S_FW_VI_CMD_VFVLD) #define G_FW_VI_CMD_VFVLD(x) \ (((x) >> S_FW_VI_CMD_VFVLD) & M_FW_VI_CMD_VFVLD) #define F_FW_VI_CMD_VFVLD V_FW_VI_CMD_VFVLD(1U) #define S_FW_VI_CMD_VIN 16 #define M_FW_VI_CMD_VIN 0xff #define V_FW_VI_CMD_VIN(x) ((x) << S_FW_VI_CMD_VIN) #define G_FW_VI_CMD_VIN(x) \ (((x) >> S_FW_VI_CMD_VIN) & M_FW_VI_CMD_VIN) #define S_FW_VI_CMD_TYPE 15 #define M_FW_VI_CMD_TYPE 0x1 #define V_FW_VI_CMD_TYPE(x) ((x) << S_FW_VI_CMD_TYPE) #define G_FW_VI_CMD_TYPE(x) \ (((x) >> S_FW_VI_CMD_TYPE) & M_FW_VI_CMD_TYPE) #define F_FW_VI_CMD_TYPE V_FW_VI_CMD_TYPE(1U) #define S_FW_VI_CMD_FUNC 12 #define M_FW_VI_CMD_FUNC 0x7 #define V_FW_VI_CMD_FUNC(x) ((x) << S_FW_VI_CMD_FUNC) #define G_FW_VI_CMD_FUNC(x) \ (((x) >> S_FW_VI_CMD_FUNC) & M_FW_VI_CMD_FUNC) #define S_FW_VI_CMD_VIID 0 #define M_FW_VI_CMD_VIID 0xfff #define V_FW_VI_CMD_VIID(x) ((x) << S_FW_VI_CMD_VIID) #define G_FW_VI_CMD_VIID(x) \ (((x) >> S_FW_VI_CMD_VIID) & M_FW_VI_CMD_VIID) #define S_FW_VI_CMD_PORTID 4 #define M_FW_VI_CMD_PORTID 0xf #define V_FW_VI_CMD_PORTID(x) ((x) << S_FW_VI_CMD_PORTID) #define G_FW_VI_CMD_PORTID(x) \ (((x) >> S_FW_VI_CMD_PORTID) & M_FW_VI_CMD_PORTID) #define S_FW_VI_CMD_NORSS 11 #define M_FW_VI_CMD_NORSS 0x1 #define V_FW_VI_CMD_NORSS(x) ((x) << S_FW_VI_CMD_NORSS) #define G_FW_VI_CMD_NORSS(x) \ (((x) >> S_FW_VI_CMD_NORSS) & M_FW_VI_CMD_NORSS) #define F_FW_VI_CMD_NORSS V_FW_VI_CMD_NORSS(1U) #define S_FW_VI_CMD_RSSSIZE 0 #define M_FW_VI_CMD_RSSSIZE 0x7ff #define V_FW_VI_CMD_RSSSIZE(x) ((x) << S_FW_VI_CMD_RSSSIZE) #define G_FW_VI_CMD_RSSSIZE(x) \ (((x) >> S_FW_VI_CMD_RSSSIZE) & M_FW_VI_CMD_RSSSIZE) #define S_FW_VI_CMD_IDSIIQ 0 #define M_FW_VI_CMD_IDSIIQ 0x3ff #define V_FW_VI_CMD_IDSIIQ(x) ((x) << S_FW_VI_CMD_IDSIIQ) #define G_FW_VI_CMD_IDSIIQ(x) \ (((x) >> S_FW_VI_CMD_IDSIIQ) & M_FW_VI_CMD_IDSIIQ) #define S_FW_VI_CMD_IDSEIQ 0 #define M_FW_VI_CMD_IDSEIQ 0x3ff #define V_FW_VI_CMD_IDSEIQ(x) ((x) << S_FW_VI_CMD_IDSEIQ) #define G_FW_VI_CMD_IDSEIQ(x) \ (((x) >> S_FW_VI_CMD_IDSEIQ) & M_FW_VI_CMD_IDSEIQ) /* Special VI_MAC command index ids */ #define FW_VI_MAC_ADD_MAC 0x3FF #define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE #define FW_VI_MAC_MAC_BASED_FREE 0x3FD #define FW_VI_MAC_ID_BASED_FREE 0x3FC enum fw_vi_mac_smac { FW_VI_MAC_MPS_TCAM_ENTRY, FW_VI_MAC_MPS_TCAM_ONLY, FW_VI_MAC_SMT_ONLY, FW_VI_MAC_SMT_AND_MPSTCAM }; enum fw_vi_mac_result { FW_VI_MAC_R_SUCCESS, FW_VI_MAC_R_F_NONEXISTENT_NOMEM, FW_VI_MAC_R_SMAC_FAIL, FW_VI_MAC_R_F_ACL_CHECK }; enum fw_vi_mac_entry_types { FW_VI_MAC_TYPE_EXACTMAC, FW_VI_MAC_TYPE_HASHVEC, FW_VI_MAC_TYPE_RAW, FW_VI_MAC_TYPE_EXACTMAC_VNI, }; struct fw_vi_mac_cmd { __be32 op_to_viid; __be32 freemacs_to_len16; union fw_vi_mac { struct fw_vi_mac_exact { __be16 valid_to_idx; __u8 macaddr[6]; } exact[7]; struct fw_vi_mac_hash { __be64 hashvec; } hash; struct fw_vi_mac_raw { __be32 raw_idx_pkd; __be32 data0_pkd; __be32 data1[2]; __be64 data0m_pkd; __be32 data1m[2]; } raw; struct fw_vi_mac_vni { __be16 valid_to_idx; __u8 macaddr[6]; __be16 r7; __u8 macaddr_mask[6]; __be32 lookup_type_to_vni; __be32 vni_mask_pkd; } exact_vni[2]; } u; }; #define S_FW_VI_MAC_CMD_SMTID 12 #define M_FW_VI_MAC_CMD_SMTID 0xff #define V_FW_VI_MAC_CMD_SMTID(x) ((x) << S_FW_VI_MAC_CMD_SMTID) #define G_FW_VI_MAC_CMD_SMTID(x) \ (((x) >> S_FW_VI_MAC_CMD_SMTID) & M_FW_VI_MAC_CMD_SMTID) #define S_FW_VI_MAC_CMD_VIID 0 #define M_FW_VI_MAC_CMD_VIID 0xfff #define V_FW_VI_MAC_CMD_VIID(x) ((x) << S_FW_VI_MAC_CMD_VIID) #define G_FW_VI_MAC_CMD_VIID(x) \ (((x) >> S_FW_VI_MAC_CMD_VIID) & M_FW_VI_MAC_CMD_VIID) #define S_FW_VI_MAC_CMD_FREEMACS 31 #define M_FW_VI_MAC_CMD_FREEMACS 0x1 #define V_FW_VI_MAC_CMD_FREEMACS(x) ((x) << S_FW_VI_MAC_CMD_FREEMACS) #define G_FW_VI_MAC_CMD_FREEMACS(x) \ (((x) >> S_FW_VI_MAC_CMD_FREEMACS) & M_FW_VI_MAC_CMD_FREEMACS) #define F_FW_VI_MAC_CMD_FREEMACS V_FW_VI_MAC_CMD_FREEMACS(1U) #define S_FW_VI_MAC_CMD_IS_SMAC 30 #define M_FW_VI_MAC_CMD_IS_SMAC 0x1 #define V_FW_VI_MAC_CMD_IS_SMAC(x) ((x) << S_FW_VI_MAC_CMD_IS_SMAC) #define G_FW_VI_MAC_CMD_IS_SMAC(x) \ (((x) >> S_FW_VI_MAC_CMD_IS_SMAC) & M_FW_VI_MAC_CMD_IS_SMAC) #define F_FW_VI_MAC_CMD_IS_SMAC V_FW_VI_MAC_CMD_IS_SMAC(1U) #define S_FW_VI_MAC_CMD_ENTRY_TYPE 23 #define M_FW_VI_MAC_CMD_ENTRY_TYPE 0x7 #define V_FW_VI_MAC_CMD_ENTRY_TYPE(x) ((x) << S_FW_VI_MAC_CMD_ENTRY_TYPE) #define G_FW_VI_MAC_CMD_ENTRY_TYPE(x) \ (((x) >> S_FW_VI_MAC_CMD_ENTRY_TYPE) & M_FW_VI_MAC_CMD_ENTRY_TYPE) #define S_FW_VI_MAC_CMD_HASHUNIEN 22 #define M_FW_VI_MAC_CMD_HASHUNIEN 0x1 #define V_FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << S_FW_VI_MAC_CMD_HASHUNIEN) #define G_FW_VI_MAC_CMD_HASHUNIEN(x) \ (((x) >> S_FW_VI_MAC_CMD_HASHUNIEN) & M_FW_VI_MAC_CMD_HASHUNIEN) #define F_FW_VI_MAC_CMD_HASHUNIEN V_FW_VI_MAC_CMD_HASHUNIEN(1U) #define S_FW_VI_MAC_CMD_VALID 15 #define M_FW_VI_MAC_CMD_VALID 0x1 #define V_FW_VI_MAC_CMD_VALID(x) ((x) << S_FW_VI_MAC_CMD_VALID) #define G_FW_VI_MAC_CMD_VALID(x) \ (((x) >> S_FW_VI_MAC_CMD_VALID) & M_FW_VI_MAC_CMD_VALID) #define F_FW_VI_MAC_CMD_VALID V_FW_VI_MAC_CMD_VALID(1U) #define S_FW_VI_MAC_CMD_PRIO 12 #define M_FW_VI_MAC_CMD_PRIO 0x7 #define V_FW_VI_MAC_CMD_PRIO(x) ((x) << S_FW_VI_MAC_CMD_PRIO) #define G_FW_VI_MAC_CMD_PRIO(x) \ (((x) >> S_FW_VI_MAC_CMD_PRIO) & M_FW_VI_MAC_CMD_PRIO) #define S_FW_VI_MAC_CMD_SMAC_RESULT 10 #define M_FW_VI_MAC_CMD_SMAC_RESULT 0x3 #define V_FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << S_FW_VI_MAC_CMD_SMAC_RESULT) #define G_FW_VI_MAC_CMD_SMAC_RESULT(x) \ (((x) >> S_FW_VI_MAC_CMD_SMAC_RESULT) & M_FW_VI_MAC_CMD_SMAC_RESULT) #define S_FW_VI_MAC_CMD_IDX 0 #define M_FW_VI_MAC_CMD_IDX 0x3ff #define V_FW_VI_MAC_CMD_IDX(x) ((x) << S_FW_VI_MAC_CMD_IDX) #define G_FW_VI_MAC_CMD_IDX(x) \ (((x) >> S_FW_VI_MAC_CMD_IDX) & M_FW_VI_MAC_CMD_IDX) #define S_FW_VI_MAC_CMD_RAW_IDX 16 #define M_FW_VI_MAC_CMD_RAW_IDX 0xffff #define V_FW_VI_MAC_CMD_RAW_IDX(x) ((x) << S_FW_VI_MAC_CMD_RAW_IDX) #define G_FW_VI_MAC_CMD_RAW_IDX(x) \ (((x) >> S_FW_VI_MAC_CMD_RAW_IDX) & M_FW_VI_MAC_CMD_RAW_IDX) #define S_FW_VI_MAC_CMD_DATA0 0 #define M_FW_VI_MAC_CMD_DATA0 0xffff #define V_FW_VI_MAC_CMD_DATA0(x) ((x) << S_FW_VI_MAC_CMD_DATA0) #define G_FW_VI_MAC_CMD_DATA0(x) \ (((x) >> S_FW_VI_MAC_CMD_DATA0) & M_FW_VI_MAC_CMD_DATA0) #define S_FW_VI_MAC_CMD_LOOKUP_TYPE 31 #define M_FW_VI_MAC_CMD_LOOKUP_TYPE 0x1 #define V_FW_VI_MAC_CMD_LOOKUP_TYPE(x) ((x) << S_FW_VI_MAC_CMD_LOOKUP_TYPE) #define G_FW_VI_MAC_CMD_LOOKUP_TYPE(x) \ (((x) >> S_FW_VI_MAC_CMD_LOOKUP_TYPE) & M_FW_VI_MAC_CMD_LOOKUP_TYPE) #define F_FW_VI_MAC_CMD_LOOKUP_TYPE V_FW_VI_MAC_CMD_LOOKUP_TYPE(1U) #define S_FW_VI_MAC_CMD_DIP_HIT 30 #define M_FW_VI_MAC_CMD_DIP_HIT 0x1 #define V_FW_VI_MAC_CMD_DIP_HIT(x) ((x) << S_FW_VI_MAC_CMD_DIP_HIT) #define G_FW_VI_MAC_CMD_DIP_HIT(x) \ (((x) >> S_FW_VI_MAC_CMD_DIP_HIT) & M_FW_VI_MAC_CMD_DIP_HIT) #define F_FW_VI_MAC_CMD_DIP_HIT V_FW_VI_MAC_CMD_DIP_HIT(1U) #define S_FW_VI_MAC_CMD_VNI 0 #define M_FW_VI_MAC_CMD_VNI 0xffffff #define V_FW_VI_MAC_CMD_VNI(x) ((x) << S_FW_VI_MAC_CMD_VNI) #define G_FW_VI_MAC_CMD_VNI(x) \ (((x) >> S_FW_VI_MAC_CMD_VNI) & M_FW_VI_MAC_CMD_VNI) /* Extracting loopback port number passed from driver. * as a part of fw_vi_mac_vni For non loopback entries * ignore the field and update port number from flowc. * Fw will ignore if physical port number received. * expected range (4-7). */ #define S_FW_VI_MAC_CMD_PORT 24 #define M_FW_VI_MAC_CMD_PORT 0x7 #define V_FW_VI_MAC_CMD_PORT(x) ((x) << S_FW_VI_MAC_CMD_PORT) #define G_FW_VI_MAC_CMD_PORT(x) \ (((x) >> S_FW_VI_MAC_CMD_PORT) & M_FW_VI_MAC_CMD_PORT) #define S_FW_VI_MAC_CMD_VNI_MASK 0 #define M_FW_VI_MAC_CMD_VNI_MASK 0xffffff #define V_FW_VI_MAC_CMD_VNI_MASK(x) ((x) << S_FW_VI_MAC_CMD_VNI_MASK) #define G_FW_VI_MAC_CMD_VNI_MASK(x) \ (((x) >> S_FW_VI_MAC_CMD_VNI_MASK) & M_FW_VI_MAC_CMD_VNI_MASK) /* T4 max MTU supported */ #define T4_MAX_MTU_SUPPORTED 9600 #define FW_RXMODE_MTU_NO_CHG 65535 struct fw_vi_rxmode_cmd { __be32 op_to_viid; __be32 retval_len16; __be32 mtu_to_vlanexen; __be32 r4_lo; }; #define S_FW_VI_RXMODE_CMD_VIID 0 #define M_FW_VI_RXMODE_CMD_VIID 0xfff #define V_FW_VI_RXMODE_CMD_VIID(x) ((x) << S_FW_VI_RXMODE_CMD_VIID) #define G_FW_VI_RXMODE_CMD_VIID(x) \ (((x) >> S_FW_VI_RXMODE_CMD_VIID) & M_FW_VI_RXMODE_CMD_VIID) #define S_FW_VI_RXMODE_CMD_MTU 16 #define M_FW_VI_RXMODE_CMD_MTU 0xffff #define V_FW_VI_RXMODE_CMD_MTU(x) ((x) << S_FW_VI_RXMODE_CMD_MTU) #define G_FW_VI_RXMODE_CMD_MTU(x) \ (((x) >> S_FW_VI_RXMODE_CMD_MTU) & M_FW_VI_RXMODE_CMD_MTU) #define S_FW_VI_RXMODE_CMD_PROMISCEN 14 #define M_FW_VI_RXMODE_CMD_PROMISCEN 0x3 #define V_FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << S_FW_VI_RXMODE_CMD_PROMISCEN) #define G_FW_VI_RXMODE_CMD_PROMISCEN(x) \ (((x) >> S_FW_VI_RXMODE_CMD_PROMISCEN) & M_FW_VI_RXMODE_CMD_PROMISCEN) #define S_FW_VI_RXMODE_CMD_ALLMULTIEN 12 #define M_FW_VI_RXMODE_CMD_ALLMULTIEN 0x3 #define V_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \ ((x) << S_FW_VI_RXMODE_CMD_ALLMULTIEN) #define G_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \ (((x) >> S_FW_VI_RXMODE_CMD_ALLMULTIEN) & M_FW_VI_RXMODE_CMD_ALLMULTIEN) #define S_FW_VI_RXMODE_CMD_BROADCASTEN 10 #define M_FW_VI_RXMODE_CMD_BROADCASTEN 0x3 #define V_FW_VI_RXMODE_CMD_BROADCASTEN(x) \ ((x) << S_FW_VI_RXMODE_CMD_BROADCASTEN) #define G_FW_VI_RXMODE_CMD_BROADCASTEN(x) \ (((x) >> S_FW_VI_RXMODE_CMD_BROADCASTEN) & M_FW_VI_RXMODE_CMD_BROADCASTEN) #define S_FW_VI_RXMODE_CMD_VLANEXEN 8 #define M_FW_VI_RXMODE_CMD_VLANEXEN 0x3 #define V_FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << S_FW_VI_RXMODE_CMD_VLANEXEN) #define G_FW_VI_RXMODE_CMD_VLANEXEN(x) \ (((x) >> S_FW_VI_RXMODE_CMD_VLANEXEN) & M_FW_VI_RXMODE_CMD_VLANEXEN) struct fw_vi_enable_cmd { __be32 op_to_viid; __be32 ien_to_len16; __be16 blinkdur; __be16 r3; __be32 r4; }; #define S_FW_VI_ENABLE_CMD_VIID 0 #define M_FW_VI_ENABLE_CMD_VIID 0xfff #define V_FW_VI_ENABLE_CMD_VIID(x) ((x) << S_FW_VI_ENABLE_CMD_VIID) #define G_FW_VI_ENABLE_CMD_VIID(x) \ (((x) >> S_FW_VI_ENABLE_CMD_VIID) & M_FW_VI_ENABLE_CMD_VIID) #define S_FW_VI_ENABLE_CMD_IEN 31 #define M_FW_VI_ENABLE_CMD_IEN 0x1 #define V_FW_VI_ENABLE_CMD_IEN(x) ((x) << S_FW_VI_ENABLE_CMD_IEN) #define G_FW_VI_ENABLE_CMD_IEN(x) \ (((x) >> S_FW_VI_ENABLE_CMD_IEN) & M_FW_VI_ENABLE_CMD_IEN) #define F_FW_VI_ENABLE_CMD_IEN V_FW_VI_ENABLE_CMD_IEN(1U) #define S_FW_VI_ENABLE_CMD_EEN 30 #define M_FW_VI_ENABLE_CMD_EEN 0x1 #define V_FW_VI_ENABLE_CMD_EEN(x) ((x) << S_FW_VI_ENABLE_CMD_EEN) #define G_FW_VI_ENABLE_CMD_EEN(x) \ (((x) >> S_FW_VI_ENABLE_CMD_EEN) & M_FW_VI_ENABLE_CMD_EEN) #define F_FW_VI_ENABLE_CMD_EEN V_FW_VI_ENABLE_CMD_EEN(1U) #define S_FW_VI_ENABLE_CMD_LED 29 #define M_FW_VI_ENABLE_CMD_LED 0x1 #define V_FW_VI_ENABLE_CMD_LED(x) ((x) << S_FW_VI_ENABLE_CMD_LED) #define G_FW_VI_ENABLE_CMD_LED(x) \ (((x) >> S_FW_VI_ENABLE_CMD_LED) & M_FW_VI_ENABLE_CMD_LED) #define F_FW_VI_ENABLE_CMD_LED V_FW_VI_ENABLE_CMD_LED(1U) #define S_FW_VI_ENABLE_CMD_DCB_INFO 28 #define M_FW_VI_ENABLE_CMD_DCB_INFO 0x1 #define V_FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << S_FW_VI_ENABLE_CMD_DCB_INFO) #define G_FW_VI_ENABLE_CMD_DCB_INFO(x) \ (((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO) #define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U) /* VI VF stats offset definitions */ #define VI_VF_NUM_STATS 16 enum fw_vi_stats_vf_index { FW_VI_VF_STAT_TX_BCAST_BYTES_IX, FW_VI_VF_STAT_TX_BCAST_FRAMES_IX, FW_VI_VF_STAT_TX_MCAST_BYTES_IX, FW_VI_VF_STAT_TX_MCAST_FRAMES_IX, FW_VI_VF_STAT_TX_UCAST_BYTES_IX, FW_VI_VF_STAT_TX_UCAST_FRAMES_IX, FW_VI_VF_STAT_TX_DROP_FRAMES_IX, FW_VI_VF_STAT_TX_OFLD_BYTES_IX, FW_VI_VF_STAT_TX_OFLD_FRAMES_IX, FW_VI_VF_STAT_RX_BCAST_BYTES_IX, FW_VI_VF_STAT_RX_BCAST_FRAMES_IX, FW_VI_VF_STAT_RX_MCAST_BYTES_IX, FW_VI_VF_STAT_RX_MCAST_FRAMES_IX, FW_VI_VF_STAT_RX_UCAST_BYTES_IX, FW_VI_VF_STAT_RX_UCAST_FRAMES_IX, FW_VI_VF_STAT_RX_ERR_FRAMES_IX }; /* VI PF stats offset definitions */ #define VI_PF_NUM_STATS 17 enum fw_vi_stats_pf_index { FW_VI_PF_STAT_TX_BCAST_BYTES_IX, FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, FW_VI_PF_STAT_TX_MCAST_BYTES_IX, FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, FW_VI_PF_STAT_TX_UCAST_BYTES_IX, FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, FW_VI_PF_STAT_TX_OFLD_BYTES_IX, FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, FW_VI_PF_STAT_RX_BYTES_IX, FW_VI_PF_STAT_RX_FRAMES_IX, FW_VI_PF_STAT_RX_BCAST_BYTES_IX, FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, FW_VI_PF_STAT_RX_MCAST_BYTES_IX, FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, FW_VI_PF_STAT_RX_UCAST_BYTES_IX, FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, FW_VI_PF_STAT_RX_ERR_FRAMES_IX }; struct fw_vi_stats_cmd { __be32 op_to_viid; __be32 retval_len16; union fw_vi_stats { struct fw_vi_stats_ctl { __be16 nstats_ix; __be16 r6; __be32 r7; __be64 stat0; __be64 stat1; __be64 stat2; __be64 stat3; __be64 stat4; __be64 stat5; } ctl; struct fw_vi_stats_pf { __be64 tx_bcast_bytes; __be64 tx_bcast_frames; __be64 tx_mcast_bytes; __be64 tx_mcast_frames; __be64 tx_ucast_bytes; __be64 tx_ucast_frames; __be64 tx_offload_bytes; __be64 tx_offload_frames; __be64 rx_pf_bytes; __be64 rx_pf_frames; __be64 rx_bcast_bytes; __be64 rx_bcast_frames; __be64 rx_mcast_bytes; __be64 rx_mcast_frames; __be64 rx_ucast_bytes; __be64 rx_ucast_frames; __be64 rx_err_frames; } pf; struct fw_vi_stats_vf { __be64 tx_bcast_bytes; __be64 tx_bcast_frames; __be64 tx_mcast_bytes; __be64 tx_mcast_frames; __be64 tx_ucast_bytes; __be64 tx_ucast_frames; __be64 tx_drop_frames; __be64 tx_offload_bytes; __be64 tx_offload_frames; __be64 rx_bcast_bytes; __be64 rx_bcast_frames; __be64 rx_mcast_bytes; __be64 rx_mcast_frames; __be64 rx_ucast_bytes; __be64 rx_ucast_frames; __be64 rx_err_frames; } vf; } u; }; #define S_FW_VI_STATS_CMD_VIID 0 #define M_FW_VI_STATS_CMD_VIID 0xfff #define V_FW_VI_STATS_CMD_VIID(x) ((x) << S_FW_VI_STATS_CMD_VIID) #define G_FW_VI_STATS_CMD_VIID(x) \ (((x) >> S_FW_VI_STATS_CMD_VIID) & M_FW_VI_STATS_CMD_VIID) #define S_FW_VI_STATS_CMD_NSTATS 12 #define M_FW_VI_STATS_CMD_NSTATS 0x7 #define V_FW_VI_STATS_CMD_NSTATS(x) ((x) << S_FW_VI_STATS_CMD_NSTATS) #define G_FW_VI_STATS_CMD_NSTATS(x) \ (((x) >> S_FW_VI_STATS_CMD_NSTATS) & M_FW_VI_STATS_CMD_NSTATS) #define S_FW_VI_STATS_CMD_IX 0 #define M_FW_VI_STATS_CMD_IX 0x1f #define V_FW_VI_STATS_CMD_IX(x) ((x) << S_FW_VI_STATS_CMD_IX) #define G_FW_VI_STATS_CMD_IX(x) \ (((x) >> S_FW_VI_STATS_CMD_IX) & M_FW_VI_STATS_CMD_IX) struct fw_acl_mac_cmd { __be32 op_to_vfn; __be32 en_to_len16; __u8 nmac; __u8 r3[7]; __be16 r4; __u8 macaddr0[6]; __be16 r5; __u8 macaddr1[6]; __be16 r6; __u8 macaddr2[6]; __be16 r7; __u8 macaddr3[6]; }; #define S_FW_ACL_MAC_CMD_PFN 8 #define M_FW_ACL_MAC_CMD_PFN 0x7 #define V_FW_ACL_MAC_CMD_PFN(x) ((x) << S_FW_ACL_MAC_CMD_PFN) #define G_FW_ACL_MAC_CMD_PFN(x) \ (((x) >> S_FW_ACL_MAC_CMD_PFN) & M_FW_ACL_MAC_CMD_PFN) #define S_FW_ACL_MAC_CMD_VFN 0 #define M_FW_ACL_MAC_CMD_VFN 0xff #define V_FW_ACL_MAC_CMD_VFN(x) ((x) << S_FW_ACL_MAC_CMD_VFN) #define G_FW_ACL_MAC_CMD_VFN(x) \ (((x) >> S_FW_ACL_MAC_CMD_VFN) & M_FW_ACL_MAC_CMD_VFN) #define S_FW_ACL_MAC_CMD_EN 31 #define M_FW_ACL_MAC_CMD_EN 0x1 #define V_FW_ACL_MAC_CMD_EN(x) ((x) << S_FW_ACL_MAC_CMD_EN) #define G_FW_ACL_MAC_CMD_EN(x) \ (((x) >> S_FW_ACL_MAC_CMD_EN) & M_FW_ACL_MAC_CMD_EN) #define F_FW_ACL_MAC_CMD_EN V_FW_ACL_MAC_CMD_EN(1U) struct fw_acl_vlan_cmd { __be32 op_to_vfn; __be32 en_to_len16; __u8 nvlan; __u8 dropnovlan_fm; __u8 r3_lo[6]; __be16 vlanid[16]; }; #define S_FW_ACL_VLAN_CMD_PFN 8 #define M_FW_ACL_VLAN_CMD_PFN 0x7 #define V_FW_ACL_VLAN_CMD_PFN(x) ((x) << S_FW_ACL_VLAN_CMD_PFN) #define G_FW_ACL_VLAN_CMD_PFN(x) \ (((x) >> S_FW_ACL_VLAN_CMD_PFN) & M_FW_ACL_VLAN_CMD_PFN) #define S_FW_ACL_VLAN_CMD_VFN 0 #define M_FW_ACL_VLAN_CMD_VFN 0xff #define V_FW_ACL_VLAN_CMD_VFN(x) ((x) << S_FW_ACL_VLAN_CMD_VFN) #define G_FW_ACL_VLAN_CMD_VFN(x) \ (((x) >> S_FW_ACL_VLAN_CMD_VFN) & M_FW_ACL_VLAN_CMD_VFN) #define S_FW_ACL_VLAN_CMD_EN 31 #define M_FW_ACL_VLAN_CMD_EN 0x1 #define V_FW_ACL_VLAN_CMD_EN(x) ((x) << S_FW_ACL_VLAN_CMD_EN) #define G_FW_ACL_VLAN_CMD_EN(x) \ (((x) >> S_FW_ACL_VLAN_CMD_EN) & M_FW_ACL_VLAN_CMD_EN) #define F_FW_ACL_VLAN_CMD_EN V_FW_ACL_VLAN_CMD_EN(1U) #define S_FW_ACL_VLAN_CMD_TRANSPARENT 30 #define M_FW_ACL_VLAN_CMD_TRANSPARENT 0x1 #define V_FW_ACL_VLAN_CMD_TRANSPARENT(x) \ ((x) << S_FW_ACL_VLAN_CMD_TRANSPARENT) #define G_FW_ACL_VLAN_CMD_TRANSPARENT(x) \ (((x) >> S_FW_ACL_VLAN_CMD_TRANSPARENT) & M_FW_ACL_VLAN_CMD_TRANSPARENT) #define F_FW_ACL_VLAN_CMD_TRANSPARENT V_FW_ACL_VLAN_CMD_TRANSPARENT(1U) #define S_FW_ACL_VLAN_CMD_PMASK 16 #define M_FW_ACL_VLAN_CMD_PMASK 0xf #define V_FW_ACL_VLAN_CMD_PMASK(x) ((x) << S_FW_ACL_VLAN_CMD_PMASK) #define G_FW_ACL_VLAN_CMD_PMASK(x) \ (((x) >> S_FW_ACL_VLAN_CMD_PMASK) & M_FW_ACL_VLAN_CMD_PMASK) #define S_FW_ACL_VLAN_CMD_DROPNOVLAN 7 #define M_FW_ACL_VLAN_CMD_DROPNOVLAN 0x1 #define V_FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << S_FW_ACL_VLAN_CMD_DROPNOVLAN) #define G_FW_ACL_VLAN_CMD_DROPNOVLAN(x) \ (((x) >> S_FW_ACL_VLAN_CMD_DROPNOVLAN) & M_FW_ACL_VLAN_CMD_DROPNOVLAN) #define F_FW_ACL_VLAN_CMD_DROPNOVLAN V_FW_ACL_VLAN_CMD_DROPNOVLAN(1U) #define S_FW_ACL_VLAN_CMD_FM 6 #define M_FW_ACL_VLAN_CMD_FM 0x1 #define V_FW_ACL_VLAN_CMD_FM(x) ((x) << S_FW_ACL_VLAN_CMD_FM) #define G_FW_ACL_VLAN_CMD_FM(x) \ (((x) >> S_FW_ACL_VLAN_CMD_FM) & M_FW_ACL_VLAN_CMD_FM) #define F_FW_ACL_VLAN_CMD_FM V_FW_ACL_VLAN_CMD_FM(1U) /* old 16-bit port capabilities bitmap (fw_port_cap16_t) */ enum fw_port_cap { FW_PORT_CAP_SPEED_100M = 0x0001, FW_PORT_CAP_SPEED_1G = 0x0002, FW_PORT_CAP_SPEED_25G = 0x0004, FW_PORT_CAP_SPEED_10G = 0x0008, FW_PORT_CAP_SPEED_40G = 0x0010, FW_PORT_CAP_SPEED_100G = 0x0020, FW_PORT_CAP_FC_RX = 0x0040, FW_PORT_CAP_FC_TX = 0x0080, FW_PORT_CAP_ANEG = 0x0100, FW_PORT_CAP_MDIAUTO = 0x0200, FW_PORT_CAP_MDISTRAIGHT = 0x0400, FW_PORT_CAP_FEC_RS = 0x0800, FW_PORT_CAP_FEC_BASER_RS = 0x1000, FW_PORT_CAP_FORCE_PAUSE = 0x2000, FW_PORT_CAP_802_3_PAUSE = 0x4000, FW_PORT_CAP_802_3_ASM_DIR = 0x8000, }; #define S_FW_PORT_CAP_SPEED 0 #define M_FW_PORT_CAP_SPEED 0x3f #define V_FW_PORT_CAP_SPEED(x) ((x) << S_FW_PORT_CAP_SPEED) #define G_FW_PORT_CAP_SPEED(x) \ (((x) >> S_FW_PORT_CAP_SPEED) & M_FW_PORT_CAP_SPEED) #define S_FW_PORT_CAP_FC 6 #define M_FW_PORT_CAP_FC 0x3 #define V_FW_PORT_CAP_FC(x) ((x) << S_FW_PORT_CAP_FC) #define G_FW_PORT_CAP_FC(x) \ (((x) >> S_FW_PORT_CAP_FC) & M_FW_PORT_CAP_FC) #define S_FW_PORT_CAP_ANEG 8 #define M_FW_PORT_CAP_ANEG 0x1 #define V_FW_PORT_CAP_ANEG(x) ((x) << S_FW_PORT_CAP_ANEG) #define G_FW_PORT_CAP_ANEG(x) \ (((x) >> S_FW_PORT_CAP_ANEG) & M_FW_PORT_CAP_ANEG) #define S_FW_PORT_CAP_FEC 11 #define M_FW_PORT_CAP_FEC 0x3 #define V_FW_PORT_CAP_FEC(x) ((x) << S_FW_PORT_CAP_FEC) #define G_FW_PORT_CAP_FEC(x) \ (((x) >> S_FW_PORT_CAP_FEC) & M_FW_PORT_CAP_FEC) #define S_FW_PORT_CAP_FORCE_PAUSE 13 #define M_FW_PORT_CAP_FORCE_PAUSE 0x1 #define V_FW_PORT_CAP_FORCE_PAUSE(x) ((x) << S_FW_PORT_CAP_FORCE_PAUSE) #define G_FW_PORT_CAP_FORCE_PAUSE(x) \ (((x) >> S_FW_PORT_CAP_FORCE_PAUSE) & M_FW_PORT_CAP_FORCE_PAUSE) #define S_FW_PORT_CAP_802_3 14 #define M_FW_PORT_CAP_802_3 0x3 #define V_FW_PORT_CAP_802_3(x) ((x) << S_FW_PORT_CAP_802_3) #define G_FW_PORT_CAP_802_3(x) \ (((x) >> S_FW_PORT_CAP_802_3) & M_FW_PORT_CAP_802_3) enum fw_port_mdi { FW_PORT_CAP_MDI_UNCHANGED, FW_PORT_CAP_MDI_AUTO, FW_PORT_CAP_MDI_F_STRAIGHT, FW_PORT_CAP_MDI_F_CROSSOVER }; #define S_FW_PORT_CAP_MDI 9 #define M_FW_PORT_CAP_MDI 3 #define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI) #define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI) /* new 32-bit port capabilities bitmap (fw_port_cap32_t) */ #define FW_PORT_CAP32_SPEED_100M 0x00000001UL #define FW_PORT_CAP32_SPEED_1G 0x00000002UL #define FW_PORT_CAP32_SPEED_10G 0x00000004UL #define FW_PORT_CAP32_SPEED_25G 0x00000008UL #define FW_PORT_CAP32_SPEED_40G 0x00000010UL #define FW_PORT_CAP32_SPEED_50G 0x00000020UL #define FW_PORT_CAP32_SPEED_100G 0x00000040UL #define FW_PORT_CAP32_SPEED_200G 0x00000080UL #define FW_PORT_CAP32_SPEED_400G 0x00000100UL #define FW_PORT_CAP32_SPEED_RESERVED1 0x00000200UL #define FW_PORT_CAP32_SPEED_RESERVED2 0x00000400UL #define FW_PORT_CAP32_SPEED_RESERVED3 0x00000800UL #define FW_PORT_CAP32_RESERVED1 0x0000f000UL #define FW_PORT_CAP32_FC_RX 0x00010000UL #define FW_PORT_CAP32_FC_TX 0x00020000UL #define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL #define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL #define FW_PORT_CAP32_ANEG 0x00100000UL #define FW_PORT_CAP32_MDIAUTO 0x00200000UL #define FW_PORT_CAP32_MDISTRAIGHT 0x00400000UL #define FW_PORT_CAP32_FEC_RS 0x00800000UL #define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL #define FW_PORT_CAP32_FEC_NO_FEC 0x02000000UL #define FW_PORT_CAP32_FEC_RESERVED2 0x04000000UL #define FW_PORT_CAP32_FEC_RESERVED3 0x08000000UL #define FW_PORT_CAP32_FORCE_PAUSE 0x10000000UL #define FW_PORT_CAP32_FORCE_FEC 0x20000000UL #define FW_PORT_CAP32_RESERVED2 0xc0000000UL #define S_FW_PORT_CAP32_SPEED 0 #define M_FW_PORT_CAP32_SPEED 0xfff #define V_FW_PORT_CAP32_SPEED(x) ((x) << S_FW_PORT_CAP32_SPEED) #define G_FW_PORT_CAP32_SPEED(x) \ (((x) >> S_FW_PORT_CAP32_SPEED) & M_FW_PORT_CAP32_SPEED) #define S_FW_PORT_CAP32_FC 16 #define M_FW_PORT_CAP32_FC 0x3 #define V_FW_PORT_CAP32_FC(x) ((x) << S_FW_PORT_CAP32_FC) #define G_FW_PORT_CAP32_FC(x) \ (((x) >> S_FW_PORT_CAP32_FC) & M_FW_PORT_CAP32_FC) #define S_FW_PORT_CAP32_802_3 18 #define M_FW_PORT_CAP32_802_3 0x3 #define V_FW_PORT_CAP32_802_3(x) ((x) << S_FW_PORT_CAP32_802_3) #define G_FW_PORT_CAP32_802_3(x) \ (((x) >> S_FW_PORT_CAP32_802_3) & M_FW_PORT_CAP32_802_3) #define S_FW_PORT_CAP32_ANEG 20 #define M_FW_PORT_CAP32_ANEG 0x1 #define V_FW_PORT_CAP32_ANEG(x) ((x) << S_FW_PORT_CAP32_ANEG) #define G_FW_PORT_CAP32_ANEG(x) \ (((x) >> S_FW_PORT_CAP32_ANEG) & M_FW_PORT_CAP32_ANEG) #define S_FW_PORT_CAP32_FORCE_PAUSE 28 #define M_FW_PORT_CAP32_FORCE_PAUSE 0x1 #define V_FW_PORT_CAP32_FORCE_PAUSE(x) ((x) << S_FW_PORT_CAP32_FORCE_PAUSE) #define G_FW_PORT_CAP32_FORCE_PAUSE(x) \ (((x) >> S_FW_PORT_CAP32_FORCE_PAUSE) & M_FW_PORT_CAP32_FORCE_PAUSE) enum fw_port_mdi32 { FW_PORT_CAP32_MDI_UNCHANGED, FW_PORT_CAP32_MDI_AUTO, FW_PORT_CAP32_MDI_F_STRAIGHT, FW_PORT_CAP32_MDI_F_CROSSOVER }; #define S_FW_PORT_CAP32_MDI 21 #define M_FW_PORT_CAP32_MDI 3 #define V_FW_PORT_CAP32_MDI(x) ((x) << S_FW_PORT_CAP32_MDI) #define G_FW_PORT_CAP32_MDI(x) \ (((x) >> S_FW_PORT_CAP32_MDI) & M_FW_PORT_CAP32_MDI) #define S_FW_PORT_CAP32_FEC 23 #define M_FW_PORT_CAP32_FEC 0x1f #define V_FW_PORT_CAP32_FEC(x) ((x) << S_FW_PORT_CAP32_FEC) #define G_FW_PORT_CAP32_FEC(x) \ (((x) >> S_FW_PORT_CAP32_FEC) & M_FW_PORT_CAP32_FEC) /* macros to isolate various 32-bit Port Capabilities sub-fields */ #define CAP32_SPEED(__cap32) \ (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) & __cap32) #define CAP32_FEC(__cap32) \ (V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC) & __cap32) #define CAP32_FC(__cap32) \ (V_FW_PORT_CAP32_FC(M_FW_PORT_CAP32_FC) & __cap32) static inline bool fec_supported(uint32_t caps) { return ((caps & (FW_PORT_CAP32_SPEED_25G | FW_PORT_CAP32_SPEED_50G | FW_PORT_CAP32_SPEED_100G)) != 0); } enum fw_port_action { FW_PORT_ACTION_L1_CFG = 0x0001, FW_PORT_ACTION_L2_CFG = 0x0002, FW_PORT_ACTION_GET_PORT_INFO = 0x0003, FW_PORT_ACTION_L2_PPP_CFG = 0x0004, FW_PORT_ACTION_L2_DCB_CFG = 0x0005, FW_PORT_ACTION_DCB_READ_TRANS = 0x0006, FW_PORT_ACTION_DCB_READ_RECV = 0x0007, FW_PORT_ACTION_DCB_READ_DET = 0x0008, FW_PORT_ACTION_L1_CFG32 = 0x0009, FW_PORT_ACTION_GET_PORT_INFO32 = 0x000a, FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010, FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011, FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012, FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020, FW_PORT_ACTION_LPBK_SS_ASIC = 0x0022, FW_PORT_ACTION_LPBK_WS_ASIC = 0x0023, FW_PORT_ACTION_LPBK_WS_EXT_PHY = 0x0025, FW_PORT_ACTION_LPBK_SS_EXT = 0x0026, FW_PORT_ACTION_DIAGNOSTICS = 0x0027, FW_PORT_ACTION_LPBK_SS_EXT_PHY = 0x0028, FW_PORT_ACTION_PHY_RESET = 0x0040, FW_PORT_ACTION_PMA_RESET = 0x0041, FW_PORT_ACTION_PCS_RESET = 0x0042, FW_PORT_ACTION_PHYXS_RESET = 0x0043, FW_PORT_ACTION_DTEXS_REEST = 0x0044, FW_PORT_ACTION_AN_RESET = 0x0045, }; enum fw_port_l2cfg_ctlbf { FW_PORT_L2_CTLBF_OVLAN0 = 0x01, FW_PORT_L2_CTLBF_OVLAN1 = 0x02, FW_PORT_L2_CTLBF_OVLAN2 = 0x04, FW_PORT_L2_CTLBF_OVLAN3 = 0x08, FW_PORT_L2_CTLBF_IVLAN = 0x10, FW_PORT_L2_CTLBF_TXIPG = 0x20, FW_PORT_L2_CTLBF_MTU = 0x40, FW_PORT_L2_CTLBF_OVLAN_FILT = 0x80, }; enum fw_dcb_app_tlv_sf { FW_DCB_APP_SF_ETHERTYPE, FW_DCB_APP_SF_SOCKET_TCP, FW_DCB_APP_SF_SOCKET_UDP, FW_DCB_APP_SF_SOCKET_ALL, }; enum fw_port_dcb_versions { FW_PORT_DCB_VER_UNKNOWN, FW_PORT_DCB_VER_CEE1D0, FW_PORT_DCB_VER_CEE1D01, FW_PORT_DCB_VER_IEEE, FW_PORT_DCB_VER_AUTO=7 }; enum fw_port_dcb_cfg { FW_PORT_DCB_CFG_PG = 0x01, FW_PORT_DCB_CFG_PFC = 0x02, FW_PORT_DCB_CFG_APPL = 0x04 }; enum fw_port_dcb_cfg_rc { FW_PORT_DCB_CFG_SUCCESS = 0x0, FW_PORT_DCB_CFG_ERROR = 0x1 }; enum fw_port_dcb_type { FW_PORT_DCB_TYPE_PGID = 0x00, FW_PORT_DCB_TYPE_PGRATE = 0x01, FW_PORT_DCB_TYPE_PRIORATE = 0x02, FW_PORT_DCB_TYPE_PFC = 0x03, FW_PORT_DCB_TYPE_APP_ID = 0x04, FW_PORT_DCB_TYPE_CONTROL = 0x05, }; enum fw_port_dcb_feature_state { FW_PORT_DCB_FEATURE_STATE_PENDING = 0x0, FW_PORT_DCB_FEATURE_STATE_SUCCESS = 0x1, FW_PORT_DCB_FEATURE_STATE_ERROR = 0x2, FW_PORT_DCB_FEATURE_STATE_TIMEOUT = 0x3, }; enum fw_port_diag_ops { FW_PORT_DIAGS_TEMP = 0x00, FW_PORT_DIAGS_TX_POWER = 0x01, FW_PORT_DIAGS_RX_POWER = 0x02, FW_PORT_DIAGS_TX_DIS = 0x03, }; struct fw_port_cmd { __be32 op_to_portid; __be32 action_to_len16; union fw_port { struct fw_port_l1cfg { __be32 rcap; __be32 r; } l1cfg; struct fw_port_l2cfg { __u8 ctlbf; __u8 ovlan3_to_ivlan0; __be16 ivlantype; __be16 txipg_force_pinfo; __be16 mtu; __be16 ovlan0mask; __be16 ovlan0type; __be16 ovlan1mask; __be16 ovlan1type; __be16 ovlan2mask; __be16 ovlan2type; __be16 ovlan3mask; __be16 ovlan3type; } l2cfg; struct fw_port_info { __be32 lstatus_to_modtype; __be16 pcap; __be16 acap; __be16 mtu; __u8 cbllen; __u8 auxlinfo; __u8 dcbxdis_pkd; __u8 r8_lo; __be16 lpacap; __be64 r9; } info; struct fw_port_diags { __u8 diagop; __u8 r[3]; __be32 diagval; } diags; union fw_port_dcb { struct fw_port_dcb_pgid { __u8 type; __u8 apply_pkd; __u8 r10_lo[2]; __be32 pgid; __be64 r11; } pgid; struct fw_port_dcb_pgrate { __u8 type; __u8 apply_pkd; __u8 r10_lo[5]; __u8 num_tcs_supported; __u8 pgrate[8]; __u8 tsa[8]; } pgrate; struct fw_port_dcb_priorate { __u8 type; __u8 apply_pkd; __u8 r10_lo[6]; __u8 strict_priorate[8]; } priorate; struct fw_port_dcb_pfc { __u8 type; __u8 pfcen; __u8 apply_pkd; __u8 r10_lo[4]; __u8 max_pfc_tcs; __be64 r11; } pfc; struct fw_port_app_priority { __u8 type; __u8 apply_pkd; __u8 r10_lo; __u8 idx; __u8 user_prio_map; __u8 sel_field; __be16 protocolid; __be64 r12; } app_priority; struct fw_port_dcb_control { __u8 type; __u8 all_syncd_pkd; __be16 dcb_version_to_app_state; __be32 r11; __be64 r12; } control; } dcb; struct fw_port_l1cfg32 { __be32 rcap32; __be32 r; } l1cfg32; struct fw_port_info32 { __be32 lstatus32_to_cbllen32; __be32 auxlinfo32_mtu32; __be32 linkattr32; __be32 pcaps32; __be32 acaps32; __be32 lpacaps32; } info32; } u; }; #define S_FW_PORT_CMD_READ 22 #define M_FW_PORT_CMD_READ 0x1 #define V_FW_PORT_CMD_READ(x) ((x) << S_FW_PORT_CMD_READ) #define G_FW_PORT_CMD_READ(x) \ (((x) >> S_FW_PORT_CMD_READ) & M_FW_PORT_CMD_READ) #define F_FW_PORT_CMD_READ V_FW_PORT_CMD_READ(1U) #define S_FW_PORT_CMD_PORTID 0 #define M_FW_PORT_CMD_PORTID 0xf #define V_FW_PORT_CMD_PORTID(x) ((x) << S_FW_PORT_CMD_PORTID) #define G_FW_PORT_CMD_PORTID(x) \ (((x) >> S_FW_PORT_CMD_PORTID) & M_FW_PORT_CMD_PORTID) #define S_FW_PORT_CMD_ACTION 16 #define M_FW_PORT_CMD_ACTION 0xffff #define V_FW_PORT_CMD_ACTION(x) ((x) << S_FW_PORT_CMD_ACTION) #define G_FW_PORT_CMD_ACTION(x) \ (((x) >> S_FW_PORT_CMD_ACTION) & M_FW_PORT_CMD_ACTION) #define S_FW_PORT_CMD_OVLAN3 7 #define M_FW_PORT_CMD_OVLAN3 0x1 #define V_FW_PORT_CMD_OVLAN3(x) ((x) << S_FW_PORT_CMD_OVLAN3) #define G_FW_PORT_CMD_OVLAN3(x) \ (((x) >> S_FW_PORT_CMD_OVLAN3) & M_FW_PORT_CMD_OVLAN3) #define F_FW_PORT_CMD_OVLAN3 V_FW_PORT_CMD_OVLAN3(1U) #define S_FW_PORT_CMD_OVLAN2 6 #define M_FW_PORT_CMD_OVLAN2 0x1 #define V_FW_PORT_CMD_OVLAN2(x) ((x) << S_FW_PORT_CMD_OVLAN2) #define G_FW_PORT_CMD_OVLAN2(x) \ (((x) >> S_FW_PORT_CMD_OVLAN2) & M_FW_PORT_CMD_OVLAN2) #define F_FW_PORT_CMD_OVLAN2 V_FW_PORT_CMD_OVLAN2(1U) #define S_FW_PORT_CMD_OVLAN1 5 #define M_FW_PORT_CMD_OVLAN1 0x1 #define V_FW_PORT_CMD_OVLAN1(x) ((x) << S_FW_PORT_CMD_OVLAN1) #define G_FW_PORT_CMD_OVLAN1(x) \ (((x) >> S_FW_PORT_CMD_OVLAN1) & M_FW_PORT_CMD_OVLAN1) #define F_FW_PORT_CMD_OVLAN1 V_FW_PORT_CMD_OVLAN1(1U) #define S_FW_PORT_CMD_OVLAN0 4 #define M_FW_PORT_CMD_OVLAN0 0x1 #define V_FW_PORT_CMD_OVLAN0(x) ((x) << S_FW_PORT_CMD_OVLAN0) #define G_FW_PORT_CMD_OVLAN0(x) \ (((x) >> S_FW_PORT_CMD_OVLAN0) & M_FW_PORT_CMD_OVLAN0) #define F_FW_PORT_CMD_OVLAN0 V_FW_PORT_CMD_OVLAN0(1U) #define S_FW_PORT_CMD_IVLAN0 3 #define M_FW_PORT_CMD_IVLAN0 0x1 #define V_FW_PORT_CMD_IVLAN0(x) ((x) << S_FW_PORT_CMD_IVLAN0) #define G_FW_PORT_CMD_IVLAN0(x) \ (((x) >> S_FW_PORT_CMD_IVLAN0) & M_FW_PORT_CMD_IVLAN0) #define F_FW_PORT_CMD_IVLAN0 V_FW_PORT_CMD_IVLAN0(1U) #define S_FW_PORT_CMD_OVLAN_FILT 2 #define M_FW_PORT_CMD_OVLAN_FILT 0x1 #define V_FW_PORT_CMD_OVLAN_FILT(x) ((x) << S_FW_PORT_CMD_OVLAN_FILT) #define G_FW_PORT_CMD_OVLAN_FILT(x) \ (((x) >> S_FW_PORT_CMD_OVLAN_FILT) & M_FW_PORT_CMD_OVLAN_FILT) #define F_FW_PORT_CMD_OVLAN_FILT V_FW_PORT_CMD_OVLAN_FILT(1U) #define S_FW_PORT_CMD_TXIPG 3 #define M_FW_PORT_CMD_TXIPG 0x1fff #define V_FW_PORT_CMD_TXIPG(x) ((x) << S_FW_PORT_CMD_TXIPG) #define G_FW_PORT_CMD_TXIPG(x) \ (((x) >> S_FW_PORT_CMD_TXIPG) & M_FW_PORT_CMD_TXIPG) #define S_FW_PORT_CMD_FORCE_PINFO 0 #define M_FW_PORT_CMD_FORCE_PINFO 0x1 #define V_FW_PORT_CMD_FORCE_PINFO(x) ((x) << S_FW_PORT_CMD_FORCE_PINFO) #define G_FW_PORT_CMD_FORCE_PINFO(x) \ (((x) >> S_FW_PORT_CMD_FORCE_PINFO) & M_FW_PORT_CMD_FORCE_PINFO) #define F_FW_PORT_CMD_FORCE_PINFO V_FW_PORT_CMD_FORCE_PINFO(1U) #define S_FW_PORT_CMD_LSTATUS 31 #define M_FW_PORT_CMD_LSTATUS 0x1 #define V_FW_PORT_CMD_LSTATUS(x) ((x) << S_FW_PORT_CMD_LSTATUS) #define G_FW_PORT_CMD_LSTATUS(x) \ (((x) >> S_FW_PORT_CMD_LSTATUS) & M_FW_PORT_CMD_LSTATUS) #define F_FW_PORT_CMD_LSTATUS V_FW_PORT_CMD_LSTATUS(1U) #define S_FW_PORT_CMD_LSPEED 24 #define M_FW_PORT_CMD_LSPEED 0x3f #define V_FW_PORT_CMD_LSPEED(x) ((x) << S_FW_PORT_CMD_LSPEED) #define G_FW_PORT_CMD_LSPEED(x) \ (((x) >> S_FW_PORT_CMD_LSPEED) & M_FW_PORT_CMD_LSPEED) #define S_FW_PORT_CMD_TXPAUSE 23 #define M_FW_PORT_CMD_TXPAUSE 0x1 #define V_FW_PORT_CMD_TXPAUSE(x) ((x) << S_FW_PORT_CMD_TXPAUSE) #define G_FW_PORT_CMD_TXPAUSE(x) \ (((x) >> S_FW_PORT_CMD_TXPAUSE) & M_FW_PORT_CMD_TXPAUSE) #define F_FW_PORT_CMD_TXPAUSE V_FW_PORT_CMD_TXPAUSE(1U) #define S_FW_PORT_CMD_RXPAUSE 22 #define M_FW_PORT_CMD_RXPAUSE 0x1 #define V_FW_PORT_CMD_RXPAUSE(x) ((x) << S_FW_PORT_CMD_RXPAUSE) #define G_FW_PORT_CMD_RXPAUSE(x) \ (((x) >> S_FW_PORT_CMD_RXPAUSE) & M_FW_PORT_CMD_RXPAUSE) #define F_FW_PORT_CMD_RXPAUSE V_FW_PORT_CMD_RXPAUSE(1U) #define S_FW_PORT_CMD_MDIOCAP 21 #define M_FW_PORT_CMD_MDIOCAP 0x1 #define V_FW_PORT_CMD_MDIOCAP(x) ((x) << S_FW_PORT_CMD_MDIOCAP) #define G_FW_PORT_CMD_MDIOCAP(x) \ (((x) >> S_FW_PORT_CMD_MDIOCAP) & M_FW_PORT_CMD_MDIOCAP) #define F_FW_PORT_CMD_MDIOCAP V_FW_PORT_CMD_MDIOCAP(1U) #define S_FW_PORT_CMD_MDIOADDR 16 #define M_FW_PORT_CMD_MDIOADDR 0x1f #define V_FW_PORT_CMD_MDIOADDR(x) ((x) << S_FW_PORT_CMD_MDIOADDR) #define G_FW_PORT_CMD_MDIOADDR(x) \ (((x) >> S_FW_PORT_CMD_MDIOADDR) & M_FW_PORT_CMD_MDIOADDR) #define S_FW_PORT_CMD_LPTXPAUSE 15 #define M_FW_PORT_CMD_LPTXPAUSE 0x1 #define V_FW_PORT_CMD_LPTXPAUSE(x) ((x) << S_FW_PORT_CMD_LPTXPAUSE) #define G_FW_PORT_CMD_LPTXPAUSE(x) \ (((x) >> S_FW_PORT_CMD_LPTXPAUSE) & M_FW_PORT_CMD_LPTXPAUSE) #define F_FW_PORT_CMD_LPTXPAUSE V_FW_PORT_CMD_LPTXPAUSE(1U) #define S_FW_PORT_CMD_LPRXPAUSE 14 #define M_FW_PORT_CMD_LPRXPAUSE 0x1 #define V_FW_PORT_CMD_LPRXPAUSE(x) ((x) << S_FW_PORT_CMD_LPRXPAUSE) #define G_FW_PORT_CMD_LPRXPAUSE(x) \ (((x) >> S_FW_PORT_CMD_LPRXPAUSE) & M_FW_PORT_CMD_LPRXPAUSE) #define F_FW_PORT_CMD_LPRXPAUSE V_FW_PORT_CMD_LPRXPAUSE(1U) #define S_FW_PORT_CMD_PTYPE 8 #define M_FW_PORT_CMD_PTYPE 0x1f #define V_FW_PORT_CMD_PTYPE(x) ((x) << S_FW_PORT_CMD_PTYPE) #define G_FW_PORT_CMD_PTYPE(x) \ (((x) >> S_FW_PORT_CMD_PTYPE) & M_FW_PORT_CMD_PTYPE) #define S_FW_PORT_CMD_LINKDNRC 5 #define M_FW_PORT_CMD_LINKDNRC 0x7 #define V_FW_PORT_CMD_LINKDNRC(x) ((x) << S_FW_PORT_CMD_LINKDNRC) #define G_FW_PORT_CMD_LINKDNRC(x) \ (((x) >> S_FW_PORT_CMD_LINKDNRC) & M_FW_PORT_CMD_LINKDNRC) #define S_FW_PORT_CMD_MODTYPE 0 #define M_FW_PORT_CMD_MODTYPE 0x1f #define V_FW_PORT_CMD_MODTYPE(x) ((x) << S_FW_PORT_CMD_MODTYPE) #define G_FW_PORT_CMD_MODTYPE(x) \ (((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE) #define S_FW_PORT_AUXLINFO_KX4 2 #define M_FW_PORT_AUXLINFO_KX4 0x1 #define V_FW_PORT_AUXLINFO_KX4(x) \ ((x) << S_FW_PORT_AUXLINFO_KX4) #define G_FW_PORT_AUXLINFO_KX4(x) \ (((x) >> S_FW_PORT_AUXLINFO_KX4) & M_FW_PORT_AUXLINFO_KX4) #define F_FW_PORT_AUXLINFO_KX4 V_FW_PORT_AUXLINFO_KX4(1U) #define S_FW_PORT_AUXLINFO_KR 1 #define M_FW_PORT_AUXLINFO_KR 0x1 #define V_FW_PORT_AUXLINFO_KR(x) \ ((x) << S_FW_PORT_AUXLINFO_KR) #define G_FW_PORT_AUXLINFO_KR(x) \ (((x) >> S_FW_PORT_AUXLINFO_KR) & M_FW_PORT_AUXLINFO_KR) #define F_FW_PORT_AUXLINFO_KR V_FW_PORT_AUXLINFO_KR(1U) #define S_FW_PORT_CMD_DCBXDIS 7 #define M_FW_PORT_CMD_DCBXDIS 0x1 #define V_FW_PORT_CMD_DCBXDIS(x) ((x) << S_FW_PORT_CMD_DCBXDIS) #define G_FW_PORT_CMD_DCBXDIS(x) \ (((x) >> S_FW_PORT_CMD_DCBXDIS) & M_FW_PORT_CMD_DCBXDIS) #define F_FW_PORT_CMD_DCBXDIS V_FW_PORT_CMD_DCBXDIS(1U) #define S_FW_PORT_CMD_APPLY 7 #define M_FW_PORT_CMD_APPLY 0x1 #define V_FW_PORT_CMD_APPLY(x) ((x) << S_FW_PORT_CMD_APPLY) #define G_FW_PORT_CMD_APPLY(x) \ (((x) >> S_FW_PORT_CMD_APPLY) & M_FW_PORT_CMD_APPLY) #define F_FW_PORT_CMD_APPLY V_FW_PORT_CMD_APPLY(1U) #define S_FW_PORT_CMD_ALL_SYNCD 7 #define M_FW_PORT_CMD_ALL_SYNCD 0x1 #define V_FW_PORT_CMD_ALL_SYNCD(x) ((x) << S_FW_PORT_CMD_ALL_SYNCD) #define G_FW_PORT_CMD_ALL_SYNCD(x) \ (((x) >> S_FW_PORT_CMD_ALL_SYNCD) & M_FW_PORT_CMD_ALL_SYNCD) #define F_FW_PORT_CMD_ALL_SYNCD V_FW_PORT_CMD_ALL_SYNCD(1U) #define S_FW_PORT_CMD_DCB_VERSION 12 #define M_FW_PORT_CMD_DCB_VERSION 0x7 #define V_FW_PORT_CMD_DCB_VERSION(x) ((x) << S_FW_PORT_CMD_DCB_VERSION) #define G_FW_PORT_CMD_DCB_VERSION(x) \ (((x) >> S_FW_PORT_CMD_DCB_VERSION) & M_FW_PORT_CMD_DCB_VERSION) #define S_FW_PORT_CMD_PFC_STATE 8 #define M_FW_PORT_CMD_PFC_STATE 0xf #define V_FW_PORT_CMD_PFC_STATE(x) ((x) << S_FW_PORT_CMD_PFC_STATE) #define G_FW_PORT_CMD_PFC_STATE(x) \ (((x) >> S_FW_PORT_CMD_PFC_STATE) & M_FW_PORT_CMD_PFC_STATE) #define S_FW_PORT_CMD_ETS_STATE 4 #define M_FW_PORT_CMD_ETS_STATE 0xf #define V_FW_PORT_CMD_ETS_STATE(x) ((x) << S_FW_PORT_CMD_ETS_STATE) #define G_FW_PORT_CMD_ETS_STATE(x) \ (((x) >> S_FW_PORT_CMD_ETS_STATE) & M_FW_PORT_CMD_ETS_STATE) #define S_FW_PORT_CMD_APP_STATE 0 #define M_FW_PORT_CMD_APP_STATE 0xf #define V_FW_PORT_CMD_APP_STATE(x) ((x) << S_FW_PORT_CMD_APP_STATE) #define G_FW_PORT_CMD_APP_STATE(x) \ (((x) >> S_FW_PORT_CMD_APP_STATE) & M_FW_PORT_CMD_APP_STATE) #define S_FW_PORT_CMD_LSTATUS32 31 #define M_FW_PORT_CMD_LSTATUS32 0x1 #define V_FW_PORT_CMD_LSTATUS32(x) ((x) << S_FW_PORT_CMD_LSTATUS32) #define G_FW_PORT_CMD_LSTATUS32(x) \ (((x) >> S_FW_PORT_CMD_LSTATUS32) & M_FW_PORT_CMD_LSTATUS32) #define F_FW_PORT_CMD_LSTATUS32 V_FW_PORT_CMD_LSTATUS32(1U) #define S_FW_PORT_CMD_LINKDNRC32 28 #define M_FW_PORT_CMD_LINKDNRC32 0x7 #define V_FW_PORT_CMD_LINKDNRC32(x) ((x) << S_FW_PORT_CMD_LINKDNRC32) #define G_FW_PORT_CMD_LINKDNRC32(x) \ (((x) >> S_FW_PORT_CMD_LINKDNRC32) & M_FW_PORT_CMD_LINKDNRC32) #define S_FW_PORT_CMD_DCBXDIS32 27 #define M_FW_PORT_CMD_DCBXDIS32 0x1 #define V_FW_PORT_CMD_DCBXDIS32(x) ((x) << S_FW_PORT_CMD_DCBXDIS32) #define G_FW_PORT_CMD_DCBXDIS32(x) \ (((x) >> S_FW_PORT_CMD_DCBXDIS32) & M_FW_PORT_CMD_DCBXDIS32) #define F_FW_PORT_CMD_DCBXDIS32 V_FW_PORT_CMD_DCBXDIS32(1U) #define S_FW_PORT_CMD_MDIOCAP32 26 #define M_FW_PORT_CMD_MDIOCAP32 0x1 #define V_FW_PORT_CMD_MDIOCAP32(x) ((x) << S_FW_PORT_CMD_MDIOCAP32) #define G_FW_PORT_CMD_MDIOCAP32(x) \ (((x) >> S_FW_PORT_CMD_MDIOCAP32) & M_FW_PORT_CMD_MDIOCAP32) #define F_FW_PORT_CMD_MDIOCAP32 V_FW_PORT_CMD_MDIOCAP32(1U) #define S_FW_PORT_CMD_MDIOADDR32 21 #define M_FW_PORT_CMD_MDIOADDR32 0x1f #define V_FW_PORT_CMD_MDIOADDR32(x) ((x) << S_FW_PORT_CMD_MDIOADDR32) #define G_FW_PORT_CMD_MDIOADDR32(x) \ (((x) >> S_FW_PORT_CMD_MDIOADDR32) & M_FW_PORT_CMD_MDIOADDR32) #define S_FW_PORT_CMD_PORTTYPE32 13 #define M_FW_PORT_CMD_PORTTYPE32 0xff #define V_FW_PORT_CMD_PORTTYPE32(x) ((x) << S_FW_PORT_CMD_PORTTYPE32) #define G_FW_PORT_CMD_PORTTYPE32(x) \ (((x) >> S_FW_PORT_CMD_PORTTYPE32) & M_FW_PORT_CMD_PORTTYPE32) #define S_FW_PORT_CMD_MODTYPE32 8 #define M_FW_PORT_CMD_MODTYPE32 0x1f #define V_FW_PORT_CMD_MODTYPE32(x) ((x) << S_FW_PORT_CMD_MODTYPE32) #define G_FW_PORT_CMD_MODTYPE32(x) \ (((x) >> S_FW_PORT_CMD_MODTYPE32) & M_FW_PORT_CMD_MODTYPE32) #define S_FW_PORT_CMD_CBLLEN32 0 #define M_FW_PORT_CMD_CBLLEN32 0xff #define V_FW_PORT_CMD_CBLLEN32(x) ((x) << S_FW_PORT_CMD_CBLLEN32) #define G_FW_PORT_CMD_CBLLEN32(x) \ (((x) >> S_FW_PORT_CMD_CBLLEN32) & M_FW_PORT_CMD_CBLLEN32) #define S_FW_PORT_CMD_AUXLINFO32 24 #define M_FW_PORT_CMD_AUXLINFO32 0xff #define V_FW_PORT_CMD_AUXLINFO32(x) ((x) << S_FW_PORT_CMD_AUXLINFO32) #define G_FW_PORT_CMD_AUXLINFO32(x) \ (((x) >> S_FW_PORT_CMD_AUXLINFO32) & M_FW_PORT_CMD_AUXLINFO32) #define S_FW_PORT_AUXLINFO32_KX4 2 #define M_FW_PORT_AUXLINFO32_KX4 0x1 #define V_FW_PORT_AUXLINFO32_KX4(x) \ ((x) << S_FW_PORT_AUXLINFO32_KX4) #define G_FW_PORT_AUXLINFO32_KX4(x) \ (((x) >> S_FW_PORT_AUXLINFO32_KX4) & M_FW_PORT_AUXLINFO32_KX4) #define F_FW_PORT_AUXLINFO32_KX4 V_FW_PORT_AUXLINFO32_KX4(1U) #define S_FW_PORT_AUXLINFO32_KR 1 #define M_FW_PORT_AUXLINFO32_KR 0x1 #define V_FW_PORT_AUXLINFO32_KR(x) \ ((x) << S_FW_PORT_AUXLINFO32_KR) #define G_FW_PORT_AUXLINFO32_KR(x) \ (((x) >> S_FW_PORT_AUXLINFO32_KR) & M_FW_PORT_AUXLINFO32_KR) #define F_FW_PORT_AUXLINFO32_KR V_FW_PORT_AUXLINFO32_KR(1U) #define S_FW_PORT_CMD_MTU32 0 #define M_FW_PORT_CMD_MTU32 0xffff #define V_FW_PORT_CMD_MTU32(x) ((x) << S_FW_PORT_CMD_MTU32) #define G_FW_PORT_CMD_MTU32(x) \ (((x) >> S_FW_PORT_CMD_MTU32) & M_FW_PORT_CMD_MTU32) /* * These are configured into the VPD and hence tools that generate * VPD may use this enumeration. * extPHY #lanes T4_I2C extI2C BP_Eq BP_ANEG Speed * * REMEMBER: * Update the Common Code t4_hw.c:t4_get_port_type_description() * with any new Firmware Port Technology Types! */ enum fw_port_type { FW_PORT_TYPE_FIBER_XFI = 0, /* Y, 1, N, Y, N, N, 10G */ FW_PORT_TYPE_FIBER_XAUI = 1, /* Y, 4, N, Y, N, N, 10G */ FW_PORT_TYPE_BT_SGMII = 2, /* Y, 1, No, No, No, No, 1G/100M */ FW_PORT_TYPE_BT_XFI = 3, /* Y, 1, No, No, No, No, 10G/1G/100M */ FW_PORT_TYPE_BT_XAUI = 4, /* Y, 4, No, No, No, No, 10G/1G/100M */ FW_PORT_TYPE_KX4 = 5, /* No, 4, No, No, Yes, Yes, 10G */ FW_PORT_TYPE_CX4 = 6, /* No, 4, No, No, No, No, 10G */ FW_PORT_TYPE_KX = 7, /* No, 1, No, No, Yes, No, 1G */ FW_PORT_TYPE_KR = 8, /* No, 1, No, No, Yes, Yes, 10G */ FW_PORT_TYPE_SFP = 9, /* No, 1, Yes, No, No, No, 10G */ FW_PORT_TYPE_BP_AP = 10, /* No, 1, No, No, Yes, Yes, 10G, BP ANGE */ FW_PORT_TYPE_BP4_AP = 11, /* No, 4, No, No, Yes, Yes, 10G, BP ANGE */ FW_PORT_TYPE_QSFP_10G = 12, /* No, 1, Yes, No, No, No, 10G */ FW_PORT_TYPE_QSA = 13, /* No, 1, Yes, No, No, No, 10G */ FW_PORT_TYPE_QSFP = 14, /* No, 4, Yes, No, No, No, 40G */ FW_PORT_TYPE_BP40_BA = 15, /* No, 4, No, No, Yes, Yes, 40G/10G/1G, BP ANGE */ FW_PORT_TYPE_KR4_100G = 16, /* No, 4, 100G/40G/25G, Backplane */ FW_PORT_TYPE_CR4_QSFP = 17, /* No, 4, 100G/40G/25G */ FW_PORT_TYPE_CR_QSFP = 18, /* No, 1, 25G Spider cable */ FW_PORT_TYPE_CR2_QSFP = 19, /* No, 2, 50G */ FW_PORT_TYPE_SFP28 = 20, /* No, 1, 25G/10G/1G */ FW_PORT_TYPE_KR_SFP28 = 21, /* No, 1, 25G/10G/1G using Backplane */ FW_PORT_TYPE_KR_XLAUI = 22, /* No, 4, 40G/10G/1G, No AN*/ FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE }; /* These are read from module's EEPROM and determined once the module is inserted. */ enum fw_port_module_type { FW_PORT_MOD_TYPE_NA = 0x0, FW_PORT_MOD_TYPE_LR = 0x1, FW_PORT_MOD_TYPE_SR = 0x2, FW_PORT_MOD_TYPE_ER = 0x3, FW_PORT_MOD_TYPE_TWINAX_PASSIVE = 0x4, FW_PORT_MOD_TYPE_TWINAX_ACTIVE = 0x5, FW_PORT_MOD_TYPE_LRM = 0x6, FW_PORT_MOD_TYPE_ERROR = M_FW_PORT_CMD_MODTYPE - 3, FW_PORT_MOD_TYPE_UNKNOWN = M_FW_PORT_CMD_MODTYPE - 2, FW_PORT_MOD_TYPE_NOTSUPPORTED = M_FW_PORT_CMD_MODTYPE - 1, FW_PORT_MOD_TYPE_NONE = M_FW_PORT_CMD_MODTYPE }; /* used by FW and tools may use this to generate VPD */ enum fw_port_mod_sub_type { FW_PORT_MOD_SUB_TYPE_NA, FW_PORT_MOD_SUB_TYPE_MV88E114X=0x1, FW_PORT_MOD_SUB_TYPE_TN8022=0x2, FW_PORT_MOD_SUB_TYPE_AQ1202=0x3, FW_PORT_MOD_SUB_TYPE_88x3120=0x4, FW_PORT_MOD_SUB_TYPE_BCM84834=0x5, FW_PORT_MOD_SUB_TYPE_BCM5482=0x6, FW_PORT_MOD_SUB_TYPE_BCM84856=0x7, FW_PORT_MOD_SUB_TYPE_BT_VSC8634=0x8, /* * The following will never been in the VPD. They are TWINAX cable * lengths decoded from SFP+ module i2c PROMs. These should almost * certainly go somewhere else ... */ FW_PORT_MOD_SUB_TYPE_TWINAX_1=0x9, FW_PORT_MOD_SUB_TYPE_TWINAX_3=0xA, FW_PORT_MOD_SUB_TYPE_TWINAX_5=0xB, FW_PORT_MOD_SUB_TYPE_TWINAX_7=0xC, }; /* link down reason codes (3b) */ enum fw_port_link_dn_rc { FW_PORT_LINK_DN_RC_NONE, FW_PORT_LINK_DN_RC_REMFLT, /* Remote fault detected */ FW_PORT_LINK_DN_ANEG_F, /* Auto-negotiation fault */ FW_PORT_LINK_DN_RESERVED3, FW_PORT_LINK_DN_OVERHEAT, /* Port overheated */ FW_PORT_LINK_DN_UNKNOWN, /* Unable to determine reason */ FW_PORT_LINK_DN_RX_LOS, /* No RX signal detected */ FW_PORT_LINK_DN_RESERVED7 }; enum fw_port_stats_tx_index { FW_STAT_TX_PORT_BYTES_IX = 0, FW_STAT_TX_PORT_FRAMES_IX, FW_STAT_TX_PORT_BCAST_IX, FW_STAT_TX_PORT_MCAST_IX, FW_STAT_TX_PORT_UCAST_IX, FW_STAT_TX_PORT_ERROR_IX, FW_STAT_TX_PORT_64B_IX, FW_STAT_TX_PORT_65B_127B_IX, FW_STAT_TX_PORT_128B_255B_IX, FW_STAT_TX_PORT_256B_511B_IX, FW_STAT_TX_PORT_512B_1023B_IX, FW_STAT_TX_PORT_1024B_1518B_IX, FW_STAT_TX_PORT_1519B_MAX_IX, FW_STAT_TX_PORT_DROP_IX, FW_STAT_TX_PORT_PAUSE_IX, FW_STAT_TX_PORT_PPP0_IX, FW_STAT_TX_PORT_PPP1_IX, FW_STAT_TX_PORT_PPP2_IX, FW_STAT_TX_PORT_PPP3_IX, FW_STAT_TX_PORT_PPP4_IX, FW_STAT_TX_PORT_PPP5_IX, FW_STAT_TX_PORT_PPP6_IX, FW_STAT_TX_PORT_PPP7_IX, FW_NUM_PORT_TX_STATS }; enum fw_port_stat_rx_index { FW_STAT_RX_PORT_BYTES_IX = 0, FW_STAT_RX_PORT_FRAMES_IX, FW_STAT_RX_PORT_BCAST_IX, FW_STAT_RX_PORT_MCAST_IX, FW_STAT_RX_PORT_UCAST_IX, FW_STAT_RX_PORT_MTU_ERROR_IX, FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, FW_STAT_RX_PORT_CRC_ERROR_IX, FW_STAT_RX_PORT_LEN_ERROR_IX, FW_STAT_RX_PORT_SYM_ERROR_IX, FW_STAT_RX_PORT_64B_IX, FW_STAT_RX_PORT_65B_127B_IX, FW_STAT_RX_PORT_128B_255B_IX, FW_STAT_RX_PORT_256B_511B_IX, FW_STAT_RX_PORT_512B_1023B_IX, FW_STAT_RX_PORT_1024B_1518B_IX, FW_STAT_RX_PORT_1519B_MAX_IX, FW_STAT_RX_PORT_PAUSE_IX, FW_STAT_RX_PORT_PPP0_IX, FW_STAT_RX_PORT_PPP1_IX, FW_STAT_RX_PORT_PPP2_IX, FW_STAT_RX_PORT_PPP3_IX, FW_STAT_RX_PORT_PPP4_IX, FW_STAT_RX_PORT_PPP5_IX, FW_STAT_RX_PORT_PPP6_IX, FW_STAT_RX_PORT_PPP7_IX, FW_STAT_RX_PORT_LESS_64B_IX, FW_STAT_RX_PORT_MAC_ERROR_IX, FW_NUM_PORT_RX_STATS }; /* port stats */ #define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + \ FW_NUM_PORT_RX_STATS) struct fw_port_stats_cmd { __be32 op_to_portid; __be32 retval_len16; union fw_port_stats { struct fw_port_stats_ctl { __u8 nstats_bg_bm; __u8 tx_ix; __be16 r6; __be32 r7; __be64 stat0; __be64 stat1; __be64 stat2; __be64 stat3; __be64 stat4; __be64 stat5; } ctl; struct fw_port_stats_all { __be64 tx_bytes; __be64 tx_frames; __be64 tx_bcast; __be64 tx_mcast; __be64 tx_ucast; __be64 tx_error; __be64 tx_64b; __be64 tx_65b_127b; __be64 tx_128b_255b; __be64 tx_256b_511b; __be64 tx_512b_1023b; __be64 tx_1024b_1518b; __be64 tx_1519b_max; __be64 tx_drop; __be64 tx_pause; __be64 tx_ppp0; __be64 tx_ppp1; __be64 tx_ppp2; __be64 tx_ppp3; __be64 tx_ppp4; __be64 tx_ppp5; __be64 tx_ppp6; __be64 tx_ppp7; __be64 rx_bytes; __be64 rx_frames; __be64 rx_bcast; __be64 rx_mcast; __be64 rx_ucast; __be64 rx_mtu_error; __be64 rx_mtu_crc_error; __be64 rx_crc_error; __be64 rx_len_error; __be64 rx_sym_error; __be64 rx_64b; __be64 rx_65b_127b; __be64 rx_128b_255b; __be64 rx_256b_511b; __be64 rx_512b_1023b; __be64 rx_1024b_1518b; __be64 rx_1519b_max; __be64 rx_pause; __be64 rx_ppp0; __be64 rx_ppp1; __be64 rx_ppp2; __be64 rx_ppp3; __be64 rx_ppp4; __be64 rx_ppp5; __be64 rx_ppp6; __be64 rx_ppp7; __be64 rx_less_64b; __be64 rx_bg_drop; __be64 rx_bg_trunc; } all; } u; }; #define S_FW_PORT_STATS_CMD_NSTATS 4 #define M_FW_PORT_STATS_CMD_NSTATS 0x7 #define V_FW_PORT_STATS_CMD_NSTATS(x) ((x) << S_FW_PORT_STATS_CMD_NSTATS) #define G_FW_PORT_STATS_CMD_NSTATS(x) \ (((x) >> S_FW_PORT_STATS_CMD_NSTATS) & M_FW_PORT_STATS_CMD_NSTATS) #define S_FW_PORT_STATS_CMD_BG_BM 0 #define M_FW_PORT_STATS_CMD_BG_BM 0x3 #define V_FW_PORT_STATS_CMD_BG_BM(x) ((x) << S_FW_PORT_STATS_CMD_BG_BM) #define G_FW_PORT_STATS_CMD_BG_BM(x) \ (((x) >> S_FW_PORT_STATS_CMD_BG_BM) & M_FW_PORT_STATS_CMD_BG_BM) #define S_FW_PORT_STATS_CMD_TX 7 #define M_FW_PORT_STATS_CMD_TX 0x1 #define V_FW_PORT_STATS_CMD_TX(x) ((x) << S_FW_PORT_STATS_CMD_TX) #define G_FW_PORT_STATS_CMD_TX(x) \ (((x) >> S_FW_PORT_STATS_CMD_TX) & M_FW_PORT_STATS_CMD_TX) #define F_FW_PORT_STATS_CMD_TX V_FW_PORT_STATS_CMD_TX(1U) #define S_FW_PORT_STATS_CMD_IX 0 #define M_FW_PORT_STATS_CMD_IX 0x3f #define V_FW_PORT_STATS_CMD_IX(x) ((x) << S_FW_PORT_STATS_CMD_IX) #define G_FW_PORT_STATS_CMD_IX(x) \ (((x) >> S_FW_PORT_STATS_CMD_IX) & M_FW_PORT_STATS_CMD_IX) /* port loopback stats */ #define FW_NUM_LB_STATS 14 enum fw_port_lb_stats_index { FW_STAT_LB_PORT_BYTES_IX, FW_STAT_LB_PORT_FRAMES_IX, FW_STAT_LB_PORT_BCAST_IX, FW_STAT_LB_PORT_MCAST_IX, FW_STAT_LB_PORT_UCAST_IX, FW_STAT_LB_PORT_ERROR_IX, FW_STAT_LB_PORT_64B_IX, FW_STAT_LB_PORT_65B_127B_IX, FW_STAT_LB_PORT_128B_255B_IX, FW_STAT_LB_PORT_256B_511B_IX, FW_STAT_LB_PORT_512B_1023B_IX, FW_STAT_LB_PORT_1024B_1518B_IX, FW_STAT_LB_PORT_1519B_MAX_IX, FW_STAT_LB_PORT_DROP_FRAMES_IX }; struct fw_port_lb_stats_cmd { __be32 op_to_lbport; __be32 retval_len16; union fw_port_lb_stats { struct fw_port_lb_stats_ctl { __u8 nstats_bg_bm; __u8 ix_pkd; __be16 r6; __be32 r7; __be64 stat0; __be64 stat1; __be64 stat2; __be64 stat3; __be64 stat4; __be64 stat5; } ctl; struct fw_port_lb_stats_all { __be64 tx_bytes; __be64 tx_frames; __be64 tx_bcast; __be64 tx_mcast; __be64 tx_ucast; __be64 tx_error; __be64 tx_64b; __be64 tx_65b_127b; __be64 tx_128b_255b; __be64 tx_256b_511b; __be64 tx_512b_1023b; __be64 tx_1024b_1518b; __be64 tx_1519b_max; __be64 rx_lb_drop; __be64 rx_lb_trunc; } all; } u; }; #define S_FW_PORT_LB_STATS_CMD_LBPORT 0 #define M_FW_PORT_LB_STATS_CMD_LBPORT 0xf #define V_FW_PORT_LB_STATS_CMD_LBPORT(x) \ ((x) << S_FW_PORT_LB_STATS_CMD_LBPORT) #define G_FW_PORT_LB_STATS_CMD_LBPORT(x) \ (((x) >> S_FW_PORT_LB_STATS_CMD_LBPORT) & M_FW_PORT_LB_STATS_CMD_LBPORT) #define S_FW_PORT_LB_STATS_CMD_NSTATS 4 #define M_FW_PORT_LB_STATS_CMD_NSTATS 0x7 #define V_FW_PORT_LB_STATS_CMD_NSTATS(x) \ ((x) << S_FW_PORT_LB_STATS_CMD_NSTATS) #define G_FW_PORT_LB_STATS_CMD_NSTATS(x) \ (((x) >> S_FW_PORT_LB_STATS_CMD_NSTATS) & M_FW_PORT_LB_STATS_CMD_NSTATS) #define S_FW_PORT_LB_STATS_CMD_BG_BM 0 #define M_FW_PORT_LB_STATS_CMD_BG_BM 0x3 #define V_FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << S_FW_PORT_LB_STATS_CMD_BG_BM) #define G_FW_PORT_LB_STATS_CMD_BG_BM(x) \ (((x) >> S_FW_PORT_LB_STATS_CMD_BG_BM) & M_FW_PORT_LB_STATS_CMD_BG_BM) #define S_FW_PORT_LB_STATS_CMD_IX 0 #define M_FW_PORT_LB_STATS_CMD_IX 0xf #define V_FW_PORT_LB_STATS_CMD_IX(x) ((x) << S_FW_PORT_LB_STATS_CMD_IX) #define G_FW_PORT_LB_STATS_CMD_IX(x) \ (((x) >> S_FW_PORT_LB_STATS_CMD_IX) & M_FW_PORT_LB_STATS_CMD_IX) /* Trace related defines */ #define FW_TRACE_CAPTURE_MAX_SINGLE_FLT_MODE 10240 #define FW_TRACE_CAPTURE_MAX_MULTI_FLT_MODE 2560 struct fw_port_trace_cmd { __be32 op_to_portid; __be32 retval_len16; __be16 traceen_to_pciech; __be16 qnum; __be32 r5; }; #define S_FW_PORT_TRACE_CMD_PORTID 0 #define M_FW_PORT_TRACE_CMD_PORTID 0xf #define V_FW_PORT_TRACE_CMD_PORTID(x) ((x) << S_FW_PORT_TRACE_CMD_PORTID) #define G_FW_PORT_TRACE_CMD_PORTID(x) \ (((x) >> S_FW_PORT_TRACE_CMD_PORTID) & M_FW_PORT_TRACE_CMD_PORTID) #define S_FW_PORT_TRACE_CMD_TRACEEN 15 #define M_FW_PORT_TRACE_CMD_TRACEEN 0x1 #define V_FW_PORT_TRACE_CMD_TRACEEN(x) ((x) << S_FW_PORT_TRACE_CMD_TRACEEN) #define G_FW_PORT_TRACE_CMD_TRACEEN(x) \ (((x) >> S_FW_PORT_TRACE_CMD_TRACEEN) & M_FW_PORT_TRACE_CMD_TRACEEN) #define F_FW_PORT_TRACE_CMD_TRACEEN V_FW_PORT_TRACE_CMD_TRACEEN(1U) #define S_FW_PORT_TRACE_CMD_FLTMODE 14 #define M_FW_PORT_TRACE_CMD_FLTMODE 0x1 #define V_FW_PORT_TRACE_CMD_FLTMODE(x) ((x) << S_FW_PORT_TRACE_CMD_FLTMODE) #define G_FW_PORT_TRACE_CMD_FLTMODE(x) \ (((x) >> S_FW_PORT_TRACE_CMD_FLTMODE) & M_FW_PORT_TRACE_CMD_FLTMODE) #define F_FW_PORT_TRACE_CMD_FLTMODE V_FW_PORT_TRACE_CMD_FLTMODE(1U) #define S_FW_PORT_TRACE_CMD_DUPLEN 13 #define M_FW_PORT_TRACE_CMD_DUPLEN 0x1 #define V_FW_PORT_TRACE_CMD_DUPLEN(x) ((x) << S_FW_PORT_TRACE_CMD_DUPLEN) #define G_FW_PORT_TRACE_CMD_DUPLEN(x) \ (((x) >> S_FW_PORT_TRACE_CMD_DUPLEN) & M_FW_PORT_TRACE_CMD_DUPLEN) #define F_FW_PORT_TRACE_CMD_DUPLEN V_FW_PORT_TRACE_CMD_DUPLEN(1U) #define S_FW_PORT_TRACE_CMD_RUNTFLTSIZE 8 #define M_FW_PORT_TRACE_CMD_RUNTFLTSIZE 0x1f #define V_FW_PORT_TRACE_CMD_RUNTFLTSIZE(x) \ ((x) << S_FW_PORT_TRACE_CMD_RUNTFLTSIZE) #define G_FW_PORT_TRACE_CMD_RUNTFLTSIZE(x) \ (((x) >> S_FW_PORT_TRACE_CMD_RUNTFLTSIZE) & \ M_FW_PORT_TRACE_CMD_RUNTFLTSIZE) #define S_FW_PORT_TRACE_CMD_PCIECH 6 #define M_FW_PORT_TRACE_CMD_PCIECH 0x3 #define V_FW_PORT_TRACE_CMD_PCIECH(x) ((x) << S_FW_PORT_TRACE_CMD_PCIECH) #define G_FW_PORT_TRACE_CMD_PCIECH(x) \ (((x) >> S_FW_PORT_TRACE_CMD_PCIECH) & M_FW_PORT_TRACE_CMD_PCIECH) struct fw_port_trace_mmap_cmd { __be32 op_to_portid; __be32 retval_len16; __be32 fid_to_skipoffset; __be32 minpktsize_capturemax; __u8 map[224]; }; #define S_FW_PORT_TRACE_MMAP_CMD_PORTID 0 #define M_FW_PORT_TRACE_MMAP_CMD_PORTID 0xf #define V_FW_PORT_TRACE_MMAP_CMD_PORTID(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_PORTID) #define G_FW_PORT_TRACE_MMAP_CMD_PORTID(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_PORTID) & \ M_FW_PORT_TRACE_MMAP_CMD_PORTID) #define S_FW_PORT_TRACE_MMAP_CMD_FID 30 #define M_FW_PORT_TRACE_MMAP_CMD_FID 0x3 #define V_FW_PORT_TRACE_MMAP_CMD_FID(x) ((x) << S_FW_PORT_TRACE_MMAP_CMD_FID) #define G_FW_PORT_TRACE_MMAP_CMD_FID(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_FID) & M_FW_PORT_TRACE_MMAP_CMD_FID) #define S_FW_PORT_TRACE_MMAP_CMD_MMAPEN 29 #define M_FW_PORT_TRACE_MMAP_CMD_MMAPEN 0x1 #define V_FW_PORT_TRACE_MMAP_CMD_MMAPEN(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_MMAPEN) #define G_FW_PORT_TRACE_MMAP_CMD_MMAPEN(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_MMAPEN) & \ M_FW_PORT_TRACE_MMAP_CMD_MMAPEN) #define F_FW_PORT_TRACE_MMAP_CMD_MMAPEN V_FW_PORT_TRACE_MMAP_CMD_MMAPEN(1U) #define S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN 28 #define M_FW_PORT_TRACE_MMAP_CMD_DCMAPEN 0x1 #define V_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN) #define G_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN) & \ M_FW_PORT_TRACE_MMAP_CMD_DCMAPEN) #define F_FW_PORT_TRACE_MMAP_CMD_DCMAPEN V_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(1U) #define S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH 8 #define M_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH 0x1f #define V_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH) #define G_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH) & \ M_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH) #define S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET 0 #define M_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET 0x1f #define V_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET) #define G_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET) & \ M_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET) #define S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE 18 #define M_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE 0x3fff #define V_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE) #define G_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE) & \ M_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE) #define S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX 0 #define M_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX 0x3fff #define V_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX(x) \ ((x) << S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX) #define G_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX(x) \ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX) & \ M_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX) enum fw_ptp_subop { /* none */ FW_PTP_SC_INIT_TIMER = 0x00, FW_PTP_SC_TX_TYPE = 0x01, /* init */ FW_PTP_SC_RXTIME_STAMP = 0x08, FW_PTP_SC_RDRX_TYPE = 0x09, /* ts */ FW_PTP_SC_ADJ_FREQ = 0x10, FW_PTP_SC_ADJ_TIME = 0x11, FW_PTP_SC_ADJ_FTIME = 0x12, FW_PTP_SC_WALL_CLOCK = 0x13, FW_PTP_SC_GET_TIME = 0x14, FW_PTP_SC_SET_TIME = 0x15, }; struct fw_ptp_cmd { __be32 op_to_portid; __be32 retval_len16; union fw_ptp { struct fw_ptp_sc { __u8 sc; __u8 r3[7]; } scmd; struct fw_ptp_init { __u8 sc; __u8 txchan; __be16 absid; __be16 mode; __be16 ptp_rx_ctrl_pkd; } init; struct fw_ptp_ts { __u8 sc; __u8 sign; __be16 r3; __be32 ppb; __be64 tm; } ts; } u; __be64 r3; }; #define S_FW_PTP_CMD_PORTID 0 #define M_FW_PTP_CMD_PORTID 0xf #define V_FW_PTP_CMD_PORTID(x) ((x) << S_FW_PTP_CMD_PORTID) #define G_FW_PTP_CMD_PORTID(x) \ (((x) >> S_FW_PTP_CMD_PORTID) & M_FW_PTP_CMD_PORTID) #define S_FW_PTP_CMD_PTP_RX_CTRL 15 #define M_FW_PTP_CMD_PTP_RX_CTRL 0x1 #define V_FW_PTP_CMD_PTP_RX_CTRL(x) ((x) << S_FW_PTP_CMD_PTP_RX_CTRL) #define G_FW_PTP_CMD_PTP_RX_CTRL(x) \ (((x) >> S_FW_PTP_CMD_PTP_RX_CTRL) & M_FW_PTP_CMD_PTP_RX_CTRL) #define F_FW_PTP_CMD_PTP_RX_CTRL V_FW_PTP_CMD_PTP_RX_CTRL(1U) struct fw_rss_ind_tbl_cmd { __be32 op_to_viid; __be32 retval_len16; __be16 niqid; __be16 startidx; __be32 r3; __be32 iq0_to_iq2; __be32 iq3_to_iq5; __be32 iq6_to_iq8; __be32 iq9_to_iq11; __be32 iq12_to_iq14; __be32 iq15_to_iq17; __be32 iq18_to_iq20; __be32 iq21_to_iq23; __be32 iq24_to_iq26; __be32 iq27_to_iq29; __be32 iq30_iq31; __be32 r15_lo; }; #define S_FW_RSS_IND_TBL_CMD_VIID 0 #define M_FW_RSS_IND_TBL_CMD_VIID 0xfff #define V_FW_RSS_IND_TBL_CMD_VIID(x) ((x) << S_FW_RSS_IND_TBL_CMD_VIID) #define G_FW_RSS_IND_TBL_CMD_VIID(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_VIID) & M_FW_RSS_IND_TBL_CMD_VIID) #define S_FW_RSS_IND_TBL_CMD_IQ0 20 #define M_FW_RSS_IND_TBL_CMD_IQ0 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ0) #define G_FW_RSS_IND_TBL_CMD_IQ0(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ0) & M_FW_RSS_IND_TBL_CMD_IQ0) #define S_FW_RSS_IND_TBL_CMD_IQ1 10 #define M_FW_RSS_IND_TBL_CMD_IQ1 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ1) #define G_FW_RSS_IND_TBL_CMD_IQ1(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ1) & M_FW_RSS_IND_TBL_CMD_IQ1) #define S_FW_RSS_IND_TBL_CMD_IQ2 0 #define M_FW_RSS_IND_TBL_CMD_IQ2 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ2) #define G_FW_RSS_IND_TBL_CMD_IQ2(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2) #define S_FW_RSS_IND_TBL_CMD_IQ3 20 #define M_FW_RSS_IND_TBL_CMD_IQ3 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ3(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ3) #define G_FW_RSS_IND_TBL_CMD_IQ3(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ3) & M_FW_RSS_IND_TBL_CMD_IQ3) #define S_FW_RSS_IND_TBL_CMD_IQ4 10 #define M_FW_RSS_IND_TBL_CMD_IQ4 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ4(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ4) #define G_FW_RSS_IND_TBL_CMD_IQ4(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ4) & M_FW_RSS_IND_TBL_CMD_IQ4) #define S_FW_RSS_IND_TBL_CMD_IQ5 0 #define M_FW_RSS_IND_TBL_CMD_IQ5 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ5(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ5) #define G_FW_RSS_IND_TBL_CMD_IQ5(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ5) & M_FW_RSS_IND_TBL_CMD_IQ5) #define S_FW_RSS_IND_TBL_CMD_IQ6 20 #define M_FW_RSS_IND_TBL_CMD_IQ6 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ6(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ6) #define G_FW_RSS_IND_TBL_CMD_IQ6(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ6) & M_FW_RSS_IND_TBL_CMD_IQ6) #define S_FW_RSS_IND_TBL_CMD_IQ7 10 #define M_FW_RSS_IND_TBL_CMD_IQ7 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ7(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ7) #define G_FW_RSS_IND_TBL_CMD_IQ7(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ7) & M_FW_RSS_IND_TBL_CMD_IQ7) #define S_FW_RSS_IND_TBL_CMD_IQ8 0 #define M_FW_RSS_IND_TBL_CMD_IQ8 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ8(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ8) #define G_FW_RSS_IND_TBL_CMD_IQ8(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ8) & M_FW_RSS_IND_TBL_CMD_IQ8) #define S_FW_RSS_IND_TBL_CMD_IQ9 20 #define M_FW_RSS_IND_TBL_CMD_IQ9 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ9(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ9) #define G_FW_RSS_IND_TBL_CMD_IQ9(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ9) & M_FW_RSS_IND_TBL_CMD_IQ9) #define S_FW_RSS_IND_TBL_CMD_IQ10 10 #define M_FW_RSS_IND_TBL_CMD_IQ10 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ10(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ10) #define G_FW_RSS_IND_TBL_CMD_IQ10(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ10) & M_FW_RSS_IND_TBL_CMD_IQ10) #define S_FW_RSS_IND_TBL_CMD_IQ11 0 #define M_FW_RSS_IND_TBL_CMD_IQ11 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ11(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ11) #define G_FW_RSS_IND_TBL_CMD_IQ11(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ11) & M_FW_RSS_IND_TBL_CMD_IQ11) #define S_FW_RSS_IND_TBL_CMD_IQ12 20 #define M_FW_RSS_IND_TBL_CMD_IQ12 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ12(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ12) #define G_FW_RSS_IND_TBL_CMD_IQ12(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ12) & M_FW_RSS_IND_TBL_CMD_IQ12) #define S_FW_RSS_IND_TBL_CMD_IQ13 10 #define M_FW_RSS_IND_TBL_CMD_IQ13 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ13(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ13) #define G_FW_RSS_IND_TBL_CMD_IQ13(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ13) & M_FW_RSS_IND_TBL_CMD_IQ13) #define S_FW_RSS_IND_TBL_CMD_IQ14 0 #define M_FW_RSS_IND_TBL_CMD_IQ14 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ14(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ14) #define G_FW_RSS_IND_TBL_CMD_IQ14(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ14) & M_FW_RSS_IND_TBL_CMD_IQ14) #define S_FW_RSS_IND_TBL_CMD_IQ15 20 #define M_FW_RSS_IND_TBL_CMD_IQ15 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ15(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ15) #define G_FW_RSS_IND_TBL_CMD_IQ15(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ15) & M_FW_RSS_IND_TBL_CMD_IQ15) #define S_FW_RSS_IND_TBL_CMD_IQ16 10 #define M_FW_RSS_IND_TBL_CMD_IQ16 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ16(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ16) #define G_FW_RSS_IND_TBL_CMD_IQ16(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ16) & M_FW_RSS_IND_TBL_CMD_IQ16) #define S_FW_RSS_IND_TBL_CMD_IQ17 0 #define M_FW_RSS_IND_TBL_CMD_IQ17 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ17(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ17) #define G_FW_RSS_IND_TBL_CMD_IQ17(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ17) & M_FW_RSS_IND_TBL_CMD_IQ17) #define S_FW_RSS_IND_TBL_CMD_IQ18 20 #define M_FW_RSS_IND_TBL_CMD_IQ18 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ18(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ18) #define G_FW_RSS_IND_TBL_CMD_IQ18(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ18) & M_FW_RSS_IND_TBL_CMD_IQ18) #define S_FW_RSS_IND_TBL_CMD_IQ19 10 #define M_FW_RSS_IND_TBL_CMD_IQ19 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ19(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ19) #define G_FW_RSS_IND_TBL_CMD_IQ19(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ19) & M_FW_RSS_IND_TBL_CMD_IQ19) #define S_FW_RSS_IND_TBL_CMD_IQ20 0 #define M_FW_RSS_IND_TBL_CMD_IQ20 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ20(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ20) #define G_FW_RSS_IND_TBL_CMD_IQ20(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ20) & M_FW_RSS_IND_TBL_CMD_IQ20) #define S_FW_RSS_IND_TBL_CMD_IQ21 20 #define M_FW_RSS_IND_TBL_CMD_IQ21 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ21(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ21) #define G_FW_RSS_IND_TBL_CMD_IQ21(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ21) & M_FW_RSS_IND_TBL_CMD_IQ21) #define S_FW_RSS_IND_TBL_CMD_IQ22 10 #define M_FW_RSS_IND_TBL_CMD_IQ22 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ22(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ22) #define G_FW_RSS_IND_TBL_CMD_IQ22(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ22) & M_FW_RSS_IND_TBL_CMD_IQ22) #define S_FW_RSS_IND_TBL_CMD_IQ23 0 #define M_FW_RSS_IND_TBL_CMD_IQ23 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ23(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ23) #define G_FW_RSS_IND_TBL_CMD_IQ23(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ23) & M_FW_RSS_IND_TBL_CMD_IQ23) #define S_FW_RSS_IND_TBL_CMD_IQ24 20 #define M_FW_RSS_IND_TBL_CMD_IQ24 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ24(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ24) #define G_FW_RSS_IND_TBL_CMD_IQ24(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ24) & M_FW_RSS_IND_TBL_CMD_IQ24) #define S_FW_RSS_IND_TBL_CMD_IQ25 10 #define M_FW_RSS_IND_TBL_CMD_IQ25 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ25(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ25) #define G_FW_RSS_IND_TBL_CMD_IQ25(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ25) & M_FW_RSS_IND_TBL_CMD_IQ25) #define S_FW_RSS_IND_TBL_CMD_IQ26 0 #define M_FW_RSS_IND_TBL_CMD_IQ26 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ26(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ26) #define G_FW_RSS_IND_TBL_CMD_IQ26(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ26) & M_FW_RSS_IND_TBL_CMD_IQ26) #define S_FW_RSS_IND_TBL_CMD_IQ27 20 #define M_FW_RSS_IND_TBL_CMD_IQ27 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ27(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ27) #define G_FW_RSS_IND_TBL_CMD_IQ27(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ27) & M_FW_RSS_IND_TBL_CMD_IQ27) #define S_FW_RSS_IND_TBL_CMD_IQ28 10 #define M_FW_RSS_IND_TBL_CMD_IQ28 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ28(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ28) #define G_FW_RSS_IND_TBL_CMD_IQ28(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ28) & M_FW_RSS_IND_TBL_CMD_IQ28) #define S_FW_RSS_IND_TBL_CMD_IQ29 0 #define M_FW_RSS_IND_TBL_CMD_IQ29 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ29(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ29) #define G_FW_RSS_IND_TBL_CMD_IQ29(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ29) & M_FW_RSS_IND_TBL_CMD_IQ29) #define S_FW_RSS_IND_TBL_CMD_IQ30 20 #define M_FW_RSS_IND_TBL_CMD_IQ30 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ30(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ30) #define G_FW_RSS_IND_TBL_CMD_IQ30(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ30) & M_FW_RSS_IND_TBL_CMD_IQ30) #define S_FW_RSS_IND_TBL_CMD_IQ31 10 #define M_FW_RSS_IND_TBL_CMD_IQ31 0x3ff #define V_FW_RSS_IND_TBL_CMD_IQ31(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ31) #define G_FW_RSS_IND_TBL_CMD_IQ31(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ31) & M_FW_RSS_IND_TBL_CMD_IQ31) struct fw_rss_glb_config_cmd { __be32 op_to_write; __be32 retval_len16; union fw_rss_glb_config { struct fw_rss_glb_config_manual { __be32 mode_pkd; __be32 r3; __be64 r4; __be64 r5; } manual; struct fw_rss_glb_config_basicvirtual { __be32 mode_keymode; __be32 synmapen_to_hashtoeplitz; __be64 r8; __be64 r9; } basicvirtual; } u; }; #define S_FW_RSS_GLB_CONFIG_CMD_MODE 28 #define M_FW_RSS_GLB_CONFIG_CMD_MODE 0xf #define V_FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << S_FW_RSS_GLB_CONFIG_CMD_MODE) #define G_FW_RSS_GLB_CONFIG_CMD_MODE(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_MODE) & M_FW_RSS_GLB_CONFIG_CMD_MODE) #define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 #define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 #define FW_RSS_GLB_CONFIG_CMD_MODE_MAX 1 #define S_FW_RSS_GLB_CONFIG_CMD_KEYMODE 26 #define M_FW_RSS_GLB_CONFIG_CMD_KEYMODE 0x3 #define V_FW_RSS_GLB_CONFIG_CMD_KEYMODE(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_KEYMODE) #define G_FW_RSS_GLB_CONFIG_CMD_KEYMODE(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_KEYMODE) & \ M_FW_RSS_GLB_CONFIG_CMD_KEYMODE) #define FW_RSS_GLB_CONFIG_CMD_KEYMODE_GLBKEY 0 #define FW_RSS_GLB_CONFIG_CMD_KEYMODE_GLBVF_KEY 1 #define FW_RSS_GLB_CONFIG_CMD_KEYMODE_PFVF_KEY 2 #define FW_RSS_GLB_CONFIG_CMD_KEYMODE_IDXVF_KEY 3 #define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 8 #define M_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) #define G_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) & \ M_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) #define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U) #define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 7 #define M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) #define G_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) & \ M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) #define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 \ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(1U) #define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 6 #define M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) #define G_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) & \ M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) #define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 \ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(1U) #define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 5 #define M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) #define G_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) & \ M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) #define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 \ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(1U) #define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 4 #define M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) #define G_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) & \ M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) #define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 \ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(1U) #define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 3 #define M_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) #define G_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) & \ M_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) #define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U) #define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 2 #define M_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) #define G_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) & \ M_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) #define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U) #define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 1 #define M_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) #define G_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) & \ M_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) #define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP \ V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(1U) #define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0 #define M_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0x1 #define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \ ((x) << S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) #define G_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) & \ M_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) #define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ \ V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(1U) struct fw_rss_vi_config_cmd { __be32 op_to_viid; __be32 retval_len16; union fw_rss_vi_config { struct fw_rss_vi_config_manual { __be64 r3; __be64 r4; __be64 r5; } manual; struct fw_rss_vi_config_basicvirtual { __be32 r6; __be32 defaultq_to_udpen; __be32 secretkeyidx_pkd; __be32 secretkeyxor; __be64 r10; } basicvirtual; } u; }; #define S_FW_RSS_VI_CONFIG_CMD_VIID 0 #define M_FW_RSS_VI_CONFIG_CMD_VIID 0xfff #define V_FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_VIID) #define G_FW_RSS_VI_CONFIG_CMD_VIID(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_VIID) & M_FW_RSS_VI_CONFIG_CMD_VIID) #define S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 16 #define M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 0x3ff #define V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \ ((x) << S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) #define G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) & \ M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) #define S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 4 #define M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 0x1 #define V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) #define G_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) & \ M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) #define F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN \ V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(1U) #define S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 3 #define M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 0x1 #define V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) #define G_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) & \ M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) #define F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN \ V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(1U) #define S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 2 #define M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 0x1 #define V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) #define G_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) & \ M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) #define F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN \ V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(1U) #define S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 1 #define M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 0x1 #define V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) #define G_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) & \ M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) #define F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN \ V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(1U) #define S_FW_RSS_VI_CONFIG_CMD_UDPEN 0 #define M_FW_RSS_VI_CONFIG_CMD_UDPEN 0x1 #define V_FW_RSS_VI_CONFIG_CMD_UDPEN(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_UDPEN) #define G_FW_RSS_VI_CONFIG_CMD_UDPEN(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN) #define F_FW_RSS_VI_CONFIG_CMD_UDPEN V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U) #define S_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX 0 #define M_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX 0xf #define V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(x) \ ((x) << S_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX) #define G_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(x) \ (((x) >> S_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX) & \ M_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX) enum fw_sched_sc { FW_SCHED_SC_CONFIG = 0, FW_SCHED_SC_PARAMS = 1, }; enum fw_sched_type { FW_SCHED_TYPE_PKTSCHED = 0, FW_SCHED_TYPE_STREAMSCHED = 1, }; enum fw_sched_params_level { FW_SCHED_PARAMS_LEVEL_CL_RL = 0, FW_SCHED_PARAMS_LEVEL_CL_WRR = 1, FW_SCHED_PARAMS_LEVEL_CH_RL = 2, }; enum fw_sched_params_mode { FW_SCHED_PARAMS_MODE_CLASS = 0, FW_SCHED_PARAMS_MODE_FLOW = 1, }; enum fw_sched_params_unit { FW_SCHED_PARAMS_UNIT_BITRATE = 0, FW_SCHED_PARAMS_UNIT_PKTRATE = 1, }; enum fw_sched_params_rate { FW_SCHED_PARAMS_RATE_REL = 0, FW_SCHED_PARAMS_RATE_ABS = 1, }; struct fw_sched_cmd { __be32 op_to_write; __be32 retval_len16; union fw_sched { struct fw_sched_config { __u8 sc; __u8 type; __u8 minmaxen; __u8 r3[5]; __u8 nclasses[4]; __be32 r4; } config; struct fw_sched_params { __u8 sc; __u8 type; __u8 level; __u8 mode; __u8 unit; __u8 rate; __u8 ch; __u8 cl; __be32 min; __be32 max; __be16 weight; __be16 pktsize; __be16 burstsize; __be16 r4; } params; } u; }; /* * length of the formatting string */ #define FW_DEVLOG_FMT_LEN 192 /* * maximum number of the formatting string parameters */ #define FW_DEVLOG_FMT_PARAMS_NUM 8 /* * priority levels */ enum fw_devlog_level { FW_DEVLOG_LEVEL_EMERG = 0x0, FW_DEVLOG_LEVEL_CRIT = 0x1, FW_DEVLOG_LEVEL_ERR = 0x2, FW_DEVLOG_LEVEL_NOTICE = 0x3, FW_DEVLOG_LEVEL_INFO = 0x4, FW_DEVLOG_LEVEL_DEBUG = 0x5, FW_DEVLOG_LEVEL_MAX = 0x5, }; /* * facilities that may send a log message */ enum fw_devlog_facility { FW_DEVLOG_FACILITY_CORE = 0x00, FW_DEVLOG_FACILITY_CF = 0x01, FW_DEVLOG_FACILITY_SCHED = 0x02, FW_DEVLOG_FACILITY_TIMER = 0x04, FW_DEVLOG_FACILITY_RES = 0x06, FW_DEVLOG_FACILITY_HW = 0x08, FW_DEVLOG_FACILITY_FLR = 0x10, FW_DEVLOG_FACILITY_DMAQ = 0x12, FW_DEVLOG_FACILITY_PHY = 0x14, FW_DEVLOG_FACILITY_MAC = 0x16, FW_DEVLOG_FACILITY_PORT = 0x18, FW_DEVLOG_FACILITY_VI = 0x1A, FW_DEVLOG_FACILITY_FILTER = 0x1C, FW_DEVLOG_FACILITY_ACL = 0x1E, FW_DEVLOG_FACILITY_TM = 0x20, FW_DEVLOG_FACILITY_QFC = 0x22, FW_DEVLOG_FACILITY_DCB = 0x24, FW_DEVLOG_FACILITY_ETH = 0x26, FW_DEVLOG_FACILITY_OFLD = 0x28, FW_DEVLOG_FACILITY_RI = 0x2A, FW_DEVLOG_FACILITY_ISCSI = 0x2C, FW_DEVLOG_FACILITY_FCOE = 0x2E, FW_DEVLOG_FACILITY_FOISCSI = 0x30, FW_DEVLOG_FACILITY_FOFCOE = 0x32, FW_DEVLOG_FACILITY_CHNET = 0x34, FW_DEVLOG_FACILITY_COISCSI = 0x36, FW_DEVLOG_FACILITY_MAX = 0x38, }; /* * log message format */ struct fw_devlog_e { __be64 timestamp; __be32 seqno; __be16 reserved1; __u8 level; __u8 facility; __u8 fmt[FW_DEVLOG_FMT_LEN]; __be32 params[FW_DEVLOG_FMT_PARAMS_NUM]; __be32 reserved3[4]; }; struct fw_devlog_cmd { __be32 op_to_write; __be32 retval_len16; __u8 level; __u8 r2[7]; __be32 memtype_devlog_memaddr16_devlog; __be32 memsize_devlog; __be32 r3[2]; }; #define S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG 28 #define M_FW_DEVLOG_CMD_MEMTYPE_DEVLOG 0xf #define V_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(x) \ ((x) << S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG) #define G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(x) \ (((x) >> S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG) & M_FW_DEVLOG_CMD_MEMTYPE_DEVLOG) #define S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG 0 #define M_FW_DEVLOG_CMD_MEMADDR16_DEVLOG 0xfffffff #define V_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(x) \ ((x) << S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG) #define G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(x) \ (((x) >> S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG) & \ M_FW_DEVLOG_CMD_MEMADDR16_DEVLOG) enum fw_watchdog_actions { FW_WATCHDOG_ACTION_SHUTDOWN = 0, FW_WATCHDOG_ACTION_FLR = 1, FW_WATCHDOG_ACTION_BYPASS = 2, FW_WATCHDOG_ACTION_TMPCHK = 3, FW_WATCHDOG_ACTION_PAUSEOFF = 4, FW_WATCHDOG_ACTION_MAX = 5, }; #define FW_WATCHDOG_MAX_TIMEOUT_SECS 60 struct fw_watchdog_cmd { __be32 op_to_vfn; __be32 retval_len16; __be32 timeout; __be32 action; }; #define S_FW_WATCHDOG_CMD_PFN 8 #define M_FW_WATCHDOG_CMD_PFN 0x7 #define V_FW_WATCHDOG_CMD_PFN(x) ((x) << S_FW_WATCHDOG_CMD_PFN) #define G_FW_WATCHDOG_CMD_PFN(x) \ (((x) >> S_FW_WATCHDOG_CMD_PFN) & M_FW_WATCHDOG_CMD_PFN) #define S_FW_WATCHDOG_CMD_VFN 0 #define M_FW_WATCHDOG_CMD_VFN 0xff #define V_FW_WATCHDOG_CMD_VFN(x) ((x) << S_FW_WATCHDOG_CMD_VFN) #define G_FW_WATCHDOG_CMD_VFN(x) \ (((x) >> S_FW_WATCHDOG_CMD_VFN) & M_FW_WATCHDOG_CMD_VFN) struct fw_clip_cmd { __be32 op_to_write; __be32 alloc_to_len16; __be64 ip_hi; __be64 ip_lo; __be32 r4[2]; }; #define S_FW_CLIP_CMD_ALLOC 31 #define M_FW_CLIP_CMD_ALLOC 0x1 #define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC) #define G_FW_CLIP_CMD_ALLOC(x) \ (((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC) #define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U) #define S_FW_CLIP_CMD_FREE 30 #define M_FW_CLIP_CMD_FREE 0x1 #define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE) #define G_FW_CLIP_CMD_FREE(x) \ (((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE) #define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U) #define S_FW_CLIP_CMD_INDEX 16 #define M_FW_CLIP_CMD_INDEX 0x1fff #define V_FW_CLIP_CMD_INDEX(x) ((x) << S_FW_CLIP_CMD_INDEX) #define G_FW_CLIP_CMD_INDEX(x) \ (((x) >> S_FW_CLIP_CMD_INDEX) & M_FW_CLIP_CMD_INDEX) struct fw_clip2_cmd { __be32 op_to_write; __be32 alloc_to_len16; __be64 ip_hi; __be64 ip_lo; __be64 ipm_hi; __be64 ipm_lo; __be32 r4[2]; }; /****************************************************************************** * F O i S C S I C O M M A N D s **************************************/ #define FW_CHNET_IFACE_ADDR_MAX 3 enum fw_chnet_iface_cmd_subop { FW_CHNET_IFACE_CMD_SUBOP_NOOP = 0, FW_CHNET_IFACE_CMD_SUBOP_LINK_UP, FW_CHNET_IFACE_CMD_SUBOP_LINK_DOWN, FW_CHNET_IFACE_CMD_SUBOP_MTU_SET, FW_CHNET_IFACE_CMD_SUBOP_MTU_GET, FW_CHNET_IFACE_CMD_SUBOP_MAX, }; struct fw_chnet_iface_cmd { __be32 op_to_portid; __be32 retval_len16; __u8 subop; __u8 r2[2]; __u8 flags; __be32 ifid_ifstate; __be16 mtu; __be16 vlanid; __be32 r3; __be16 r4; __u8 mac[6]; }; #define S_FW_CHNET_IFACE_CMD_PORTID 0 #define M_FW_CHNET_IFACE_CMD_PORTID 0xf #define V_FW_CHNET_IFACE_CMD_PORTID(x) ((x) << S_FW_CHNET_IFACE_CMD_PORTID) #define G_FW_CHNET_IFACE_CMD_PORTID(x) \ (((x) >> S_FW_CHNET_IFACE_CMD_PORTID) & M_FW_CHNET_IFACE_CMD_PORTID) #define S_FW_CHNET_IFACE_CMD_RSS_IQID 16 #define M_FW_CHNET_IFACE_CMD_RSS_IQID 0xffff #define V_FW_CHNET_IFACE_CMD_RSS_IQID(x) \ ((x) << S_FW_CHNET_IFACE_CMD_RSS_IQID) #define G_FW_CHNET_IFACE_CMD_RSS_IQID(x) \ (((x) >> S_FW_CHNET_IFACE_CMD_RSS_IQID) & M_FW_CHNET_IFACE_CMD_RSS_IQID) #define S_FW_CHNET_IFACE_CMD_RSS_IQID_F 0 #define M_FW_CHNET_IFACE_CMD_RSS_IQID_F 0x1 #define V_FW_CHNET_IFACE_CMD_RSS_IQID_F(x) \ ((x) << S_FW_CHNET_IFACE_CMD_RSS_IQID_F) #define G_FW_CHNET_IFACE_CMD_RSS_IQID_F(x) \ (((x) >> S_FW_CHNET_IFACE_CMD_RSS_IQID_F) & \ M_FW_CHNET_IFACE_CMD_RSS_IQID_F) #define F_FW_CHNET_IFACE_CMD_RSS_IQID_F V_FW_CHNET_IFACE_CMD_RSS_IQID_F(1U) #define S_FW_CHNET_IFACE_CMD_IFID 8 #define M_FW_CHNET_IFACE_CMD_IFID 0xffffff #define V_FW_CHNET_IFACE_CMD_IFID(x) ((x) << S_FW_CHNET_IFACE_CMD_IFID) #define G_FW_CHNET_IFACE_CMD_IFID(x) \ (((x) >> S_FW_CHNET_IFACE_CMD_IFID) & M_FW_CHNET_IFACE_CMD_IFID) #define S_FW_CHNET_IFACE_CMD_IFSTATE 0 #define M_FW_CHNET_IFACE_CMD_IFSTATE 0xff #define V_FW_CHNET_IFACE_CMD_IFSTATE(x) ((x) << S_FW_CHNET_IFACE_CMD_IFSTATE) #define G_FW_CHNET_IFACE_CMD_IFSTATE(x) \ (((x) >> S_FW_CHNET_IFACE_CMD_IFSTATE) & M_FW_CHNET_IFACE_CMD_IFSTATE) struct fw_fcoe_res_info_cmd { __be32 op_to_read; __be32 retval_len16; __be16 e_d_tov; __be16 r_a_tov_seq; __be16 r_a_tov_els; __be16 r_r_tov; __be32 max_xchgs; __be32 max_ssns; __be32 used_xchgs; __be32 used_ssns; __be32 max_fcfs; __be32 max_vnps; __be32 used_fcfs; __be32 used_vnps; }; struct fw_fcoe_link_cmd { __be32 op_to_portid; __be32 retval_len16; __be32 sub_opcode_fcfi; __u8 r3; __u8 lstatus; __be16 flags; __u8 r4; __u8 set_vlan; __be16 vlan_id; __be32 vnpi_pkd; __be16 r6; __u8 phy_mac[6]; __u8 vnport_wwnn[8]; __u8 vnport_wwpn[8]; }; #define S_FW_FCOE_LINK_CMD_PORTID 0 #define M_FW_FCOE_LINK_CMD_PORTID 0xf #define V_FW_FCOE_LINK_CMD_PORTID(x) ((x) << S_FW_FCOE_LINK_CMD_PORTID) #define G_FW_FCOE_LINK_CMD_PORTID(x) \ (((x) >> S_FW_FCOE_LINK_CMD_PORTID) & M_FW_FCOE_LINK_CMD_PORTID) #define S_FW_FCOE_LINK_CMD_SUB_OPCODE 24 #define M_FW_FCOE_LINK_CMD_SUB_OPCODE 0xff #define V_FW_FCOE_LINK_CMD_SUB_OPCODE(x) \ ((x) << S_FW_FCOE_LINK_CMD_SUB_OPCODE) #define G_FW_FCOE_LINK_CMD_SUB_OPCODE(x) \ (((x) >> S_FW_FCOE_LINK_CMD_SUB_OPCODE) & M_FW_FCOE_LINK_CMD_SUB_OPCODE) #define S_FW_FCOE_LINK_CMD_FCFI 0 #define M_FW_FCOE_LINK_CMD_FCFI 0xffffff #define V_FW_FCOE_LINK_CMD_FCFI(x) ((x) << S_FW_FCOE_LINK_CMD_FCFI) #define G_FW_FCOE_LINK_CMD_FCFI(x) \ (((x) >> S_FW_FCOE_LINK_CMD_FCFI) & M_FW_FCOE_LINK_CMD_FCFI) #define S_FW_FCOE_LINK_CMD_VNPI 0 #define M_FW_FCOE_LINK_CMD_VNPI 0xfffff #define V_FW_FCOE_LINK_CMD_VNPI(x) ((x) << S_FW_FCOE_LINK_CMD_VNPI) #define G_FW_FCOE_LINK_CMD_VNPI(x) \ (((x) >> S_FW_FCOE_LINK_CMD_VNPI) & M_FW_FCOE_LINK_CMD_VNPI) struct fw_fcoe_vnp_cmd { __be32 op_to_fcfi; __be32 alloc_to_len16; __be32 gen_wwn_to_vnpi; __be32 vf_id; __be16 iqid; __u8 vnport_mac[6]; __u8 vnport_wwnn[8]; __u8 vnport_wwpn[8]; __u8 cmn_srv_parms[16]; __u8 clsp_word_0_1[8]; }; #define S_FW_FCOE_VNP_CMD_FCFI 0 #define M_FW_FCOE_VNP_CMD_FCFI 0xfffff #define V_FW_FCOE_VNP_CMD_FCFI(x) ((x) << S_FW_FCOE_VNP_CMD_FCFI) #define G_FW_FCOE_VNP_CMD_FCFI(x) \ (((x) >> S_FW_FCOE_VNP_CMD_FCFI) & M_FW_FCOE_VNP_CMD_FCFI) #define S_FW_FCOE_VNP_CMD_ALLOC 31 #define M_FW_FCOE_VNP_CMD_ALLOC 0x1 #define V_FW_FCOE_VNP_CMD_ALLOC(x) ((x) << S_FW_FCOE_VNP_CMD_ALLOC) #define G_FW_FCOE_VNP_CMD_ALLOC(x) \ (((x) >> S_FW_FCOE_VNP_CMD_ALLOC) & M_FW_FCOE_VNP_CMD_ALLOC) #define F_FW_FCOE_VNP_CMD_ALLOC V_FW_FCOE_VNP_CMD_ALLOC(1U) #define S_FW_FCOE_VNP_CMD_FREE 30 #define M_FW_FCOE_VNP_CMD_FREE 0x1 #define V_FW_FCOE_VNP_CMD_FREE(x) ((x) << S_FW_FCOE_VNP_CMD_FREE) #define G_FW_FCOE_VNP_CMD_FREE(x) \ (((x) >> S_FW_FCOE_VNP_CMD_FREE) & M_FW_FCOE_VNP_CMD_FREE) #define F_FW_FCOE_VNP_CMD_FREE V_FW_FCOE_VNP_CMD_FREE(1U) #define S_FW_FCOE_VNP_CMD_MODIFY 29 #define M_FW_FCOE_VNP_CMD_MODIFY 0x1 #define V_FW_FCOE_VNP_CMD_MODIFY(x) ((x) << S_FW_FCOE_VNP_CMD_MODIFY) #define G_FW_FCOE_VNP_CMD_MODIFY(x) \ (((x) >> S_FW_FCOE_VNP_CMD_MODIFY) & M_FW_FCOE_VNP_CMD_MODIFY) #define F_FW_FCOE_VNP_CMD_MODIFY V_FW_FCOE_VNP_CMD_MODIFY(1U) #define S_FW_FCOE_VNP_CMD_GEN_WWN 22 #define M_FW_FCOE_VNP_CMD_GEN_WWN 0x1 #define V_FW_FCOE_VNP_CMD_GEN_WWN(x) ((x) << S_FW_FCOE_VNP_CMD_GEN_WWN) #define G_FW_FCOE_VNP_CMD_GEN_WWN(x) \ (((x) >> S_FW_FCOE_VNP_CMD_GEN_WWN) & M_FW_FCOE_VNP_CMD_GEN_WWN) #define F_FW_FCOE_VNP_CMD_GEN_WWN V_FW_FCOE_VNP_CMD_GEN_WWN(1U) #define S_FW_FCOE_VNP_CMD_PERSIST 21 #define M_FW_FCOE_VNP_CMD_PERSIST 0x1 #define V_FW_FCOE_VNP_CMD_PERSIST(x) ((x) << S_FW_FCOE_VNP_CMD_PERSIST) #define G_FW_FCOE_VNP_CMD_PERSIST(x) \ (((x) >> S_FW_FCOE_VNP_CMD_PERSIST) & M_FW_FCOE_VNP_CMD_PERSIST) #define F_FW_FCOE_VNP_CMD_PERSIST V_FW_FCOE_VNP_CMD_PERSIST(1U) #define S_FW_FCOE_VNP_CMD_VFID_EN 20 #define M_FW_FCOE_VNP_CMD_VFID_EN 0x1 #define V_FW_FCOE_VNP_CMD_VFID_EN(x) ((x) << S_FW_FCOE_VNP_CMD_VFID_EN) #define G_FW_FCOE_VNP_CMD_VFID_EN(x) \ (((x) >> S_FW_FCOE_VNP_CMD_VFID_EN) & M_FW_FCOE_VNP_CMD_VFID_EN) #define F_FW_FCOE_VNP_CMD_VFID_EN V_FW_FCOE_VNP_CMD_VFID_EN(1U) #define S_FW_FCOE_VNP_CMD_VNPI 0 #define M_FW_FCOE_VNP_CMD_VNPI 0xfffff #define V_FW_FCOE_VNP_CMD_VNPI(x) ((x) << S_FW_FCOE_VNP_CMD_VNPI) #define G_FW_FCOE_VNP_CMD_VNPI(x) \ (((x) >> S_FW_FCOE_VNP_CMD_VNPI) & M_FW_FCOE_VNP_CMD_VNPI) struct fw_fcoe_sparams_cmd { __be32 op_to_portid; __be32 retval_len16; __u8 r3[7]; __u8 cos; __u8 lport_wwnn[8]; __u8 lport_wwpn[8]; __u8 cmn_srv_parms[16]; __u8 cls_srv_parms[16]; }; #define S_FW_FCOE_SPARAMS_CMD_PORTID 0 #define M_FW_FCOE_SPARAMS_CMD_PORTID 0xf #define V_FW_FCOE_SPARAMS_CMD_PORTID(x) ((x) << S_FW_FCOE_SPARAMS_CMD_PORTID) #define G_FW_FCOE_SPARAMS_CMD_PORTID(x) \ (((x) >> S_FW_FCOE_SPARAMS_CMD_PORTID) & M_FW_FCOE_SPARAMS_CMD_PORTID) struct fw_fcoe_stats_cmd { __be32 op_to_flowid; __be32 free_to_len16; union fw_fcoe_stats { struct fw_fcoe_stats_ctl { __u8 nstats_port; __u8 port_valid_ix; __be16 r6; __be32 r7; __be64 stat0; __be64 stat1; __be64 stat2; __be64 stat3; __be64 stat4; __be64 stat5; } ctl; struct fw_fcoe_port_stats { __be64 tx_bcast_bytes; __be64 tx_bcast_frames; __be64 tx_mcast_bytes; __be64 tx_mcast_frames; __be64 tx_ucast_bytes; __be64 tx_ucast_frames; __be64 tx_drop_frames; __be64 tx_offload_bytes; __be64 tx_offload_frames; __be64 rx_bcast_bytes; __be64 rx_bcast_frames; __be64 rx_mcast_bytes; __be64 rx_mcast_frames; __be64 rx_ucast_bytes; __be64 rx_ucast_frames; __be64 rx_err_frames; } port_stats; struct fw_fcoe_fcf_stats { __be32 fip_tx_bytes; __be32 fip_tx_fr; __be64 fcf_ka; __be64 mcast_adv_rcvd; __be16 ucast_adv_rcvd; __be16 sol_sent; __be16 vlan_req; __be16 vlan_rpl; __be16 clr_vlink; __be16 link_down; __be16 link_up; __be16 logo; __be16 flogi_req; __be16 flogi_rpl; __be16 fdisc_req; __be16 fdisc_rpl; __be16 fka_prd_chg; __be16 fc_map_chg; __be16 vfid_chg; __u8 no_fka_req; __u8 no_vnp; } fcf_stats; struct fw_fcoe_pcb_stats { __be64 tx_bytes; __be64 tx_frames; __be64 rx_bytes; __be64 rx_frames; __be32 vnp_ka; __be32 unsol_els_rcvd; __be64 unsol_cmd_rcvd; __be16 implicit_logo; __be16 flogi_inv_sparm; __be16 fdisc_inv_sparm; __be16 flogi_rjt; __be16 fdisc_rjt; __be16 no_ssn; __be16 mac_flt_fail; __be16 inv_fr_rcvd; } pcb_stats; struct fw_fcoe_scb_stats { __be64 tx_bytes; __be64 tx_frames; __be64 rx_bytes; __be64 rx_frames; __be32 host_abrt_req; __be32 adap_auto_abrt; __be32 adap_abrt_rsp; __be32 host_ios_req; __be16 ssn_offl_ios; __be16 ssn_not_rdy_ios; __u8 rx_data_ddp_err; __u8 ddp_flt_set_err; __be16 rx_data_fr_err; __u8 bad_st_abrt_req; __u8 no_io_abrt_req; __u8 abort_tmo; __u8 abort_tmo_2; __be32 abort_req; __u8 no_ppod_res_tmo; __u8 bp_tmo; __u8 adap_auto_cls; __u8 no_io_cls_req; __be32 host_cls_req; __be64 unsol_cmd_rcvd; __be32 plogi_req_rcvd; __be32 prli_req_rcvd; __be16 logo_req_rcvd; __be16 prlo_req_rcvd; __be16 plogi_rjt_rcvd; __be16 prli_rjt_rcvd; __be32 adisc_req_rcvd; __be32 rscn_rcvd; __be32 rrq_req_rcvd; __be32 unsol_els_rcvd; __u8 adisc_rjt_rcvd; __u8 scr_rjt; __u8 ct_rjt; __u8 inval_bls_rcvd; __be32 ba_rjt_rcvd; } scb_stats; } u; }; #define S_FW_FCOE_STATS_CMD_FLOWID 0 #define M_FW_FCOE_STATS_CMD_FLOWID 0xfffff #define V_FW_FCOE_STATS_CMD_FLOWID(x) ((x) << S_FW_FCOE_STATS_CMD_FLOWID) #define G_FW_FCOE_STATS_CMD_FLOWID(x) \ (((x) >> S_FW_FCOE_STATS_CMD_FLOWID) & M_FW_FCOE_STATS_CMD_FLOWID) #define S_FW_FCOE_STATS_CMD_FREE 30 #define M_FW_FCOE_STATS_CMD_FREE 0x1 #define V_FW_FCOE_STATS_CMD_FREE(x) ((x) << S_FW_FCOE_STATS_CMD_FREE) #define G_FW_FCOE_STATS_CMD_FREE(x) \ (((x) >> S_FW_FCOE_STATS_CMD_FREE) & M_FW_FCOE_STATS_CMD_FREE) #define F_FW_FCOE_STATS_CMD_FREE V_FW_FCOE_STATS_CMD_FREE(1U) #define S_FW_FCOE_STATS_CMD_NSTATS 4 #define M_FW_FCOE_STATS_CMD_NSTATS 0x7 #define V_FW_FCOE_STATS_CMD_NSTATS(x) ((x) << S_FW_FCOE_STATS_CMD_NSTATS) #define G_FW_FCOE_STATS_CMD_NSTATS(x) \ (((x) >> S_FW_FCOE_STATS_CMD_NSTATS) & M_FW_FCOE_STATS_CMD_NSTATS) #define S_FW_FCOE_STATS_CMD_PORT 0 #define M_FW_FCOE_STATS_CMD_PORT 0x3 #define V_FW_FCOE_STATS_CMD_PORT(x) ((x) << S_FW_FCOE_STATS_CMD_PORT) #define G_FW_FCOE_STATS_CMD_PORT(x) \ (((x) >> S_FW_FCOE_STATS_CMD_PORT) & M_FW_FCOE_STATS_CMD_PORT) #define S_FW_FCOE_STATS_CMD_PORT_VALID 7 #define M_FW_FCOE_STATS_CMD_PORT_VALID 0x1 #define V_FW_FCOE_STATS_CMD_PORT_VALID(x) \ ((x) << S_FW_FCOE_STATS_CMD_PORT_VALID) #define G_FW_FCOE_STATS_CMD_PORT_VALID(x) \ (((x) >> S_FW_FCOE_STATS_CMD_PORT_VALID) & M_FW_FCOE_STATS_CMD_PORT_VALID) #define F_FW_FCOE_STATS_CMD_PORT_VALID V_FW_FCOE_STATS_CMD_PORT_VALID(1U) #define S_FW_FCOE_STATS_CMD_IX 0 #define M_FW_FCOE_STATS_CMD_IX 0x3f #define V_FW_FCOE_STATS_CMD_IX(x) ((x) << S_FW_FCOE_STATS_CMD_IX) #define G_FW_FCOE_STATS_CMD_IX(x) \ (((x) >> S_FW_FCOE_STATS_CMD_IX) & M_FW_FCOE_STATS_CMD_IX) struct fw_fcoe_fcf_cmd { __be32 op_to_fcfi; __be32 retval_len16; __be16 priority_pkd; __u8 mac[6]; __u8 name_id[8]; __u8 fabric[8]; __be16 vf_id; __be16 max_fcoe_size; __u8 vlan_id; __u8 fc_map[3]; __be32 fka_adv; __be32 r6; __u8 r7_hi; __u8 fpma_to_portid; __u8 spma_mac[6]; __be64 r8; }; #define S_FW_FCOE_FCF_CMD_FCFI 0 #define M_FW_FCOE_FCF_CMD_FCFI 0xfffff #define V_FW_FCOE_FCF_CMD_FCFI(x) ((x) << S_FW_FCOE_FCF_CMD_FCFI) #define G_FW_FCOE_FCF_CMD_FCFI(x) \ (((x) >> S_FW_FCOE_FCF_CMD_FCFI) & M_FW_FCOE_FCF_CMD_FCFI) #define S_FW_FCOE_FCF_CMD_PRIORITY 0 #define M_FW_FCOE_FCF_CMD_PRIORITY 0xff #define V_FW_FCOE_FCF_CMD_PRIORITY(x) ((x) << S_FW_FCOE_FCF_CMD_PRIORITY) #define G_FW_FCOE_FCF_CMD_PRIORITY(x) \ (((x) >> S_FW_FCOE_FCF_CMD_PRIORITY) & M_FW_FCOE_FCF_CMD_PRIORITY) #define S_FW_FCOE_FCF_CMD_FPMA 6 #define M_FW_FCOE_FCF_CMD_FPMA 0x1 #define V_FW_FCOE_FCF_CMD_FPMA(x) ((x) << S_FW_FCOE_FCF_CMD_FPMA) #define G_FW_FCOE_FCF_CMD_FPMA(x) \ (((x) >> S_FW_FCOE_FCF_CMD_FPMA) & M_FW_FCOE_FCF_CMD_FPMA) #define F_FW_FCOE_FCF_CMD_FPMA V_FW_FCOE_FCF_CMD_FPMA(1U) #define S_FW_FCOE_FCF_CMD_SPMA 5 #define M_FW_FCOE_FCF_CMD_SPMA 0x1 #define V_FW_FCOE_FCF_CMD_SPMA(x) ((x) << S_FW_FCOE_FCF_CMD_SPMA) #define G_FW_FCOE_FCF_CMD_SPMA(x) \ (((x) >> S_FW_FCOE_FCF_CMD_SPMA) & M_FW_FCOE_FCF_CMD_SPMA) #define F_FW_FCOE_FCF_CMD_SPMA V_FW_FCOE_FCF_CMD_SPMA(1U) #define S_FW_FCOE_FCF_CMD_LOGIN 4 #define M_FW_FCOE_FCF_CMD_LOGIN 0x1 #define V_FW_FCOE_FCF_CMD_LOGIN(x) ((x) << S_FW_FCOE_FCF_CMD_LOGIN) #define G_FW_FCOE_FCF_CMD_LOGIN(x) \ (((x) >> S_FW_FCOE_FCF_CMD_LOGIN) & M_FW_FCOE_FCF_CMD_LOGIN) #define F_FW_FCOE_FCF_CMD_LOGIN V_FW_FCOE_FCF_CMD_LOGIN(1U) #define S_FW_FCOE_FCF_CMD_PORTID 0 #define M_FW_FCOE_FCF_CMD_PORTID 0xf #define V_FW_FCOE_FCF_CMD_PORTID(x) ((x) << S_FW_FCOE_FCF_CMD_PORTID) #define G_FW_FCOE_FCF_CMD_PORTID(x) \ (((x) >> S_FW_FCOE_FCF_CMD_PORTID) & M_FW_FCOE_FCF_CMD_PORTID) /****************************************************************************** * E R R O R a n d D E B U G C O M M A N D s ******************************************************/ enum fw_error_type { FW_ERROR_TYPE_EXCEPTION = 0x0, FW_ERROR_TYPE_HWMODULE = 0x1, FW_ERROR_TYPE_WR = 0x2, FW_ERROR_TYPE_ACL = 0x3, }; enum fw_dcb_ieee_locations { FW_IEEE_LOC_LOCAL, FW_IEEE_LOC_PEER, FW_IEEE_LOC_OPERATIONAL, }; struct fw_dcb_ieee_cmd { __be32 op_to_location; __be32 changed_to_len16; union fw_dcbx_stats { struct fw_dcbx_pfc_stats_ieee { __be32 pfc_mbc_pkd; __be32 pfc_willing_to_pfc_en; } dcbx_pfc_stats; struct fw_dcbx_ets_stats_ieee { __be32 cbs_to_ets_max_tc; __be32 pg_table; __u8 pg_percent[8]; __u8 tsa[8]; } dcbx_ets_stats; struct fw_dcbx_app_stats_ieee { __be32 num_apps_pkd; __be32 r6; __be32 app[4]; } dcbx_app_stats; struct fw_dcbx_control { __be32 multi_peer_invalidated; __u8 version; __u8 r6[3]; } dcbx_control; } u; }; #define S_FW_DCB_IEEE_CMD_PORT 8 #define M_FW_DCB_IEEE_CMD_PORT 0x7 #define V_FW_DCB_IEEE_CMD_PORT(x) ((x) << S_FW_DCB_IEEE_CMD_PORT) #define G_FW_DCB_IEEE_CMD_PORT(x) \ (((x) >> S_FW_DCB_IEEE_CMD_PORT) & M_FW_DCB_IEEE_CMD_PORT) #define S_FW_DCB_IEEE_CMD_FEATURE 2 #define M_FW_DCB_IEEE_CMD_FEATURE 0x7 #define V_FW_DCB_IEEE_CMD_FEATURE(x) ((x) << S_FW_DCB_IEEE_CMD_FEATURE) #define G_FW_DCB_IEEE_CMD_FEATURE(x) \ (((x) >> S_FW_DCB_IEEE_CMD_FEATURE) & M_FW_DCB_IEEE_CMD_FEATURE) #define S_FW_DCB_IEEE_CMD_LOCATION 0 #define M_FW_DCB_IEEE_CMD_LOCATION 0x3 #define V_FW_DCB_IEEE_CMD_LOCATION(x) ((x) << S_FW_DCB_IEEE_CMD_LOCATION) #define G_FW_DCB_IEEE_CMD_LOCATION(x) \ (((x) >> S_FW_DCB_IEEE_CMD_LOCATION) & M_FW_DCB_IEEE_CMD_LOCATION) #define S_FW_DCB_IEEE_CMD_CHANGED 20 #define M_FW_DCB_IEEE_CMD_CHANGED 0x1 #define V_FW_DCB_IEEE_CMD_CHANGED(x) ((x) << S_FW_DCB_IEEE_CMD_CHANGED) #define G_FW_DCB_IEEE_CMD_CHANGED(x) \ (((x) >> S_FW_DCB_IEEE_CMD_CHANGED) & M_FW_DCB_IEEE_CMD_CHANGED) #define F_FW_DCB_IEEE_CMD_CHANGED V_FW_DCB_IEEE_CMD_CHANGED(1U) #define S_FW_DCB_IEEE_CMD_RECEIVED 19 #define M_FW_DCB_IEEE_CMD_RECEIVED 0x1 #define V_FW_DCB_IEEE_CMD_RECEIVED(x) ((x) << S_FW_DCB_IEEE_CMD_RECEIVED) #define G_FW_DCB_IEEE_CMD_RECEIVED(x) \ (((x) >> S_FW_DCB_IEEE_CMD_RECEIVED) & M_FW_DCB_IEEE_CMD_RECEIVED) #define F_FW_DCB_IEEE_CMD_RECEIVED V_FW_DCB_IEEE_CMD_RECEIVED(1U) #define S_FW_DCB_IEEE_CMD_APPLY 18 #define M_FW_DCB_IEEE_CMD_APPLY 0x1 #define V_FW_DCB_IEEE_CMD_APPLY(x) ((x) << S_FW_DCB_IEEE_CMD_APPLY) #define G_FW_DCB_IEEE_CMD_APPLY(x) \ (((x) >> S_FW_DCB_IEEE_CMD_APPLY) & M_FW_DCB_IEEE_CMD_APPLY) #define F_FW_DCB_IEEE_CMD_APPLY V_FW_DCB_IEEE_CMD_APPLY(1U) #define S_FW_DCB_IEEE_CMD_DISABLED 17 #define M_FW_DCB_IEEE_CMD_DISABLED 0x1 #define V_FW_DCB_IEEE_CMD_DISABLED(x) ((x) << S_FW_DCB_IEEE_CMD_DISABLED) #define G_FW_DCB_IEEE_CMD_DISABLED(x) \ (((x) >> S_FW_DCB_IEEE_CMD_DISABLED) & M_FW_DCB_IEEE_CMD_DISABLED) #define F_FW_DCB_IEEE_CMD_DISABLED V_FW_DCB_IEEE_CMD_DISABLED(1U) #define S_FW_DCB_IEEE_CMD_MORE 16 #define M_FW_DCB_IEEE_CMD_MORE 0x1 #define V_FW_DCB_IEEE_CMD_MORE(x) ((x) << S_FW_DCB_IEEE_CMD_MORE) #define G_FW_DCB_IEEE_CMD_MORE(x) \ (((x) >> S_FW_DCB_IEEE_CMD_MORE) & M_FW_DCB_IEEE_CMD_MORE) #define F_FW_DCB_IEEE_CMD_MORE V_FW_DCB_IEEE_CMD_MORE(1U) #define S_FW_DCB_IEEE_CMD_PFC_MBC 0 #define M_FW_DCB_IEEE_CMD_PFC_MBC 0x1 #define V_FW_DCB_IEEE_CMD_PFC_MBC(x) ((x) << S_FW_DCB_IEEE_CMD_PFC_MBC) #define G_FW_DCB_IEEE_CMD_PFC_MBC(x) \ (((x) >> S_FW_DCB_IEEE_CMD_PFC_MBC) & M_FW_DCB_IEEE_CMD_PFC_MBC) #define F_FW_DCB_IEEE_CMD_PFC_MBC V_FW_DCB_IEEE_CMD_PFC_MBC(1U) #define S_FW_DCB_IEEE_CMD_PFC_WILLING 16 #define M_FW_DCB_IEEE_CMD_PFC_WILLING 0x1 #define V_FW_DCB_IEEE_CMD_PFC_WILLING(x) \ ((x) << S_FW_DCB_IEEE_CMD_PFC_WILLING) #define G_FW_DCB_IEEE_CMD_PFC_WILLING(x) \ (((x) >> S_FW_DCB_IEEE_CMD_PFC_WILLING) & M_FW_DCB_IEEE_CMD_PFC_WILLING) #define F_FW_DCB_IEEE_CMD_PFC_WILLING V_FW_DCB_IEEE_CMD_PFC_WILLING(1U) #define S_FW_DCB_IEEE_CMD_PFC_MAX_TC 8 #define M_FW_DCB_IEEE_CMD_PFC_MAX_TC 0xff #define V_FW_DCB_IEEE_CMD_PFC_MAX_TC(x) ((x) << S_FW_DCB_IEEE_CMD_PFC_MAX_TC) #define G_FW_DCB_IEEE_CMD_PFC_MAX_TC(x) \ (((x) >> S_FW_DCB_IEEE_CMD_PFC_MAX_TC) & M_FW_DCB_IEEE_CMD_PFC_MAX_TC) #define S_FW_DCB_IEEE_CMD_PFC_EN 0 #define M_FW_DCB_IEEE_CMD_PFC_EN 0xff #define V_FW_DCB_IEEE_CMD_PFC_EN(x) ((x) << S_FW_DCB_IEEE_CMD_PFC_EN) #define G_FW_DCB_IEEE_CMD_PFC_EN(x) \ (((x) >> S_FW_DCB_IEEE_CMD_PFC_EN) & M_FW_DCB_IEEE_CMD_PFC_EN) #define S_FW_DCB_IEEE_CMD_CBS 16 #define M_FW_DCB_IEEE_CMD_CBS 0x1 #define V_FW_DCB_IEEE_CMD_CBS(x) ((x) << S_FW_DCB_IEEE_CMD_CBS) #define G_FW_DCB_IEEE_CMD_CBS(x) \ (((x) >> S_FW_DCB_IEEE_CMD_CBS) & M_FW_DCB_IEEE_CMD_CBS) #define F_FW_DCB_IEEE_CMD_CBS V_FW_DCB_IEEE_CMD_CBS(1U) #define S_FW_DCB_IEEE_CMD_ETS_WILLING 8 #define M_FW_DCB_IEEE_CMD_ETS_WILLING 0x1 #define V_FW_DCB_IEEE_CMD_ETS_WILLING(x) \ ((x) << S_FW_DCB_IEEE_CMD_ETS_WILLING) #define G_FW_DCB_IEEE_CMD_ETS_WILLING(x) \ (((x) >> S_FW_DCB_IEEE_CMD_ETS_WILLING) & M_FW_DCB_IEEE_CMD_ETS_WILLING) #define F_FW_DCB_IEEE_CMD_ETS_WILLING V_FW_DCB_IEEE_CMD_ETS_WILLING(1U) #define S_FW_DCB_IEEE_CMD_ETS_MAX_TC 0 #define M_FW_DCB_IEEE_CMD_ETS_MAX_TC 0xff #define V_FW_DCB_IEEE_CMD_ETS_MAX_TC(x) ((x) << S_FW_DCB_IEEE_CMD_ETS_MAX_TC) #define G_FW_DCB_IEEE_CMD_ETS_MAX_TC(x) \ (((x) >> S_FW_DCB_IEEE_CMD_ETS_MAX_TC) & M_FW_DCB_IEEE_CMD_ETS_MAX_TC) #define S_FW_DCB_IEEE_CMD_NUM_APPS 0 #define M_FW_DCB_IEEE_CMD_NUM_APPS 0x7 #define V_FW_DCB_IEEE_CMD_NUM_APPS(x) ((x) << S_FW_DCB_IEEE_CMD_NUM_APPS) #define G_FW_DCB_IEEE_CMD_NUM_APPS(x) \ (((x) >> S_FW_DCB_IEEE_CMD_NUM_APPS) & M_FW_DCB_IEEE_CMD_NUM_APPS) #define S_FW_DCB_IEEE_CMD_MULTI_PEER 31 #define M_FW_DCB_IEEE_CMD_MULTI_PEER 0x1 #define V_FW_DCB_IEEE_CMD_MULTI_PEER(x) ((x) << S_FW_DCB_IEEE_CMD_MULTI_PEER) #define G_FW_DCB_IEEE_CMD_MULTI_PEER(x) \ (((x) >> S_FW_DCB_IEEE_CMD_MULTI_PEER) & M_FW_DCB_IEEE_CMD_MULTI_PEER) #define F_FW_DCB_IEEE_CMD_MULTI_PEER V_FW_DCB_IEEE_CMD_MULTI_PEER(1U) #define S_FW_DCB_IEEE_CMD_INVALIDATED 30 #define M_FW_DCB_IEEE_CMD_INVALIDATED 0x1 #define V_FW_DCB_IEEE_CMD_INVALIDATED(x) \ ((x) << S_FW_DCB_IEEE_CMD_INVALIDATED) #define G_FW_DCB_IEEE_CMD_INVALIDATED(x) \ (((x) >> S_FW_DCB_IEEE_CMD_INVALIDATED) & M_FW_DCB_IEEE_CMD_INVALIDATED) #define F_FW_DCB_IEEE_CMD_INVALIDATED V_FW_DCB_IEEE_CMD_INVALIDATED(1U) /* Hand-written */ #define S_FW_DCB_IEEE_CMD_APP_PROTOCOL 16 #define M_FW_DCB_IEEE_CMD_APP_PROTOCOL 0xffff #define V_FW_DCB_IEEE_CMD_APP_PROTOCOL(x) ((x) << S_FW_DCB_IEEE_CMD_APP_PROTOCOL) #define G_FW_DCB_IEEE_CMD_APP_PROTOCOL(x) \ (((x) >> S_FW_DCB_IEEE_CMD_APP_PROTOCOL) & M_FW_DCB_IEEE_CMD_APP_PROTOCOL) #define S_FW_DCB_IEEE_CMD_APP_SELECT 3 #define M_FW_DCB_IEEE_CMD_APP_SELECT 0x7 #define V_FW_DCB_IEEE_CMD_APP_SELECT(x) ((x) << S_FW_DCB_IEEE_CMD_APP_SELECT) #define G_FW_DCB_IEEE_CMD_APP_SELECT(x) \ (((x) >> S_FW_DCB_IEEE_CMD_APP_SELECT) & M_FW_DCB_IEEE_CMD_APP_SELECT) #define S_FW_DCB_IEEE_CMD_APP_PRIORITY 0 #define M_FW_DCB_IEEE_CMD_APP_PRIORITY 0x7 #define V_FW_DCB_IEEE_CMD_APP_PRIORITY(x) ((x) << S_FW_DCB_IEEE_CMD_APP_PRIORITY) #define G_FW_DCB_IEEE_CMD_APP_PRIORITY(x) \ (((x) >> S_FW_DCB_IEEE_CMD_APP_PRIORITY) & M_FW_DCB_IEEE_CMD_APP_PRIORITY) struct fw_error_cmd { __be32 op_to_type; __be32 len16_pkd; union fw_error { struct fw_error_exception { __be32 info[6]; } exception; struct fw_error_hwmodule { __be32 regaddr; __be32 regval; } hwmodule; struct fw_error_wr { __be16 cidx; __be16 pfn_vfn; __be32 eqid; __u8 wrhdr[16]; } wr; struct fw_error_acl { __be16 cidx; __be16 pfn_vfn; __be32 eqid; __be16 mv_pkd; __u8 val[6]; __be64 r4; } acl; } u; }; #define S_FW_ERROR_CMD_FATAL 4 #define M_FW_ERROR_CMD_FATAL 0x1 #define V_FW_ERROR_CMD_FATAL(x) ((x) << S_FW_ERROR_CMD_FATAL) #define G_FW_ERROR_CMD_FATAL(x) \ (((x) >> S_FW_ERROR_CMD_FATAL) & M_FW_ERROR_CMD_FATAL) #define F_FW_ERROR_CMD_FATAL V_FW_ERROR_CMD_FATAL(1U) #define S_FW_ERROR_CMD_TYPE 0 #define M_FW_ERROR_CMD_TYPE 0xf #define V_FW_ERROR_CMD_TYPE(x) ((x) << S_FW_ERROR_CMD_TYPE) #define G_FW_ERROR_CMD_TYPE(x) \ (((x) >> S_FW_ERROR_CMD_TYPE) & M_FW_ERROR_CMD_TYPE) #define S_FW_ERROR_CMD_PFN 8 #define M_FW_ERROR_CMD_PFN 0x7 #define V_FW_ERROR_CMD_PFN(x) ((x) << S_FW_ERROR_CMD_PFN) #define G_FW_ERROR_CMD_PFN(x) \ (((x) >> S_FW_ERROR_CMD_PFN) & M_FW_ERROR_CMD_PFN) #define S_FW_ERROR_CMD_VFN 0 #define M_FW_ERROR_CMD_VFN 0xff #define V_FW_ERROR_CMD_VFN(x) ((x) << S_FW_ERROR_CMD_VFN) #define G_FW_ERROR_CMD_VFN(x) \ (((x) >> S_FW_ERROR_CMD_VFN) & M_FW_ERROR_CMD_VFN) #define S_FW_ERROR_CMD_PFN 8 #define M_FW_ERROR_CMD_PFN 0x7 #define V_FW_ERROR_CMD_PFN(x) ((x) << S_FW_ERROR_CMD_PFN) #define G_FW_ERROR_CMD_PFN(x) \ (((x) >> S_FW_ERROR_CMD_PFN) & M_FW_ERROR_CMD_PFN) #define S_FW_ERROR_CMD_VFN 0 #define M_FW_ERROR_CMD_VFN 0xff #define V_FW_ERROR_CMD_VFN(x) ((x) << S_FW_ERROR_CMD_VFN) #define G_FW_ERROR_CMD_VFN(x) \ (((x) >> S_FW_ERROR_CMD_VFN) & M_FW_ERROR_CMD_VFN) #define S_FW_ERROR_CMD_MV 15 #define M_FW_ERROR_CMD_MV 0x1 #define V_FW_ERROR_CMD_MV(x) ((x) << S_FW_ERROR_CMD_MV) #define G_FW_ERROR_CMD_MV(x) \ (((x) >> S_FW_ERROR_CMD_MV) & M_FW_ERROR_CMD_MV) #define F_FW_ERROR_CMD_MV V_FW_ERROR_CMD_MV(1U) struct fw_debug_cmd { __be32 op_type; __be32 len16_pkd; union fw_debug { struct fw_debug_assert { __be32 fcid; __be32 line; __be32 x; __be32 y; __u8 filename_0_7[8]; __u8 filename_8_15[8]; __be64 r3; } assert; struct fw_debug_prt { __be16 dprtstridx; __be16 r3[3]; __be32 dprtstrparam0; __be32 dprtstrparam1; __be32 dprtstrparam2; __be32 dprtstrparam3; } prt; } u; }; #define S_FW_DEBUG_CMD_TYPE 0 #define M_FW_DEBUG_CMD_TYPE 0xff #define V_FW_DEBUG_CMD_TYPE(x) ((x) << S_FW_DEBUG_CMD_TYPE) #define G_FW_DEBUG_CMD_TYPE(x) \ (((x) >> S_FW_DEBUG_CMD_TYPE) & M_FW_DEBUG_CMD_TYPE) enum fw_diag_cmd_type { FW_DIAG_CMD_TYPE_OFLDIAG = 0, FW_DIAG_CMD_TYPE_MEM_TEST_DIAG, }; enum fw_diag_cmd_ofldiag_op { FW_DIAG_CMD_OFLDIAG_TEST_NONE = 0, FW_DIAG_CMD_OFLDIAG_TEST_START, FW_DIAG_CMD_OFLDIAG_TEST_STOP, FW_DIAG_CMD_OFLDIAG_TEST_STATUS, }; enum fw_diag_cmd_ofldiag_status { FW_DIAG_CMD_OFLDIAG_STATUS_IDLE = 0, FW_DIAG_CMD_OFLDIAG_STATUS_RUNNING, FW_DIAG_CMD_OFLDIAG_STATUS_FAILED, FW_DIAG_CMD_OFLDIAG_STATUS_PASSED, }; enum fw_diag_cmd_memdiag_op { FW_DIAG_CMD_MEMDIAG_TEST_START=1, FW_DIAG_CMD_MEMDIAG_TEST_STOP, FW_DIAG_CMD_MEMDIAG_TEST_STATUS, FW_DIAG_CMD_MEMDIAG_TEST_INIT, }; enum fw_diag_cmd_memdiag_status { FW_DIAG_CMD_MEMDIAG_STATUS_NONE, FW_DIAG_CMD_MEMDIAG_STATUS_RUNNING, FW_DIAG_CMD_MEMDIAG_STATUS_FAILED, FW_DIAG_CMD_MEMDIAG_STATUS_PASSED }; struct fw_diag_cmd { __be32 op_type; __be32 len16_pkd; union fw_diag_test { struct fw_diag_test_ofldiag { __u8 test_op; __u8 r3; __be16 test_status; __be32 duration; } ofldiag; struct fw_diag_test_memtest_diag { __u8 test_op; __u8 test_status; __be16 size; /* in KB */ __be32 duration; /* in seconds */ } memdiag; } u; }; #define S_FW_DIAG_CMD_OPCODE 24 #define M_FW_DIAG_CMD_OPCODE 0xff #define V_FW_DIAG_CMD_OPCODE(x) ((x) << S_FW_DIAG_CMD_OPCODE) #define G_FW_DIAG_CMD_OPCODE(x) \ (((x) >> S_FW_DIAG_CMD_OPCODE) & M_FW_DIAG_CMD_OPCODE) #define S_FW_DIAG_CMD_TYPE 0 #define M_FW_DIAG_CMD_TYPE 0xff #define V_FW_DIAG_CMD_TYPE(x) ((x) << S_FW_DIAG_CMD_TYPE) #define G_FW_DIAG_CMD_TYPE(x) \ (((x) >> S_FW_DIAG_CMD_TYPE) & M_FW_DIAG_CMD_TYPE) #define S_FW_DIAG_CMD_LEN16 0 #define M_FW_DIAG_CMD_LEN16 0xff #define V_FW_DIAG_CMD_LEN16(x) ((x) << S_FW_DIAG_CMD_LEN16) #define G_FW_DIAG_CMD_LEN16(x) \ (((x) >> S_FW_DIAG_CMD_LEN16) & M_FW_DIAG_CMD_LEN16) struct fw_hma_cmd { __be32 op_pkd; __be32 retval_len16; __be32 mode_to_pcie_params; __be32 naddr_size; __be32 addr_size_pkd; __be32 r6; __be64 phy_address[5]; }; #define S_FW_HMA_CMD_MODE 31 #define M_FW_HMA_CMD_MODE 0x1 #define V_FW_HMA_CMD_MODE(x) ((x) << S_FW_HMA_CMD_MODE) #define G_FW_HMA_CMD_MODE(x) \ (((x) >> S_FW_HMA_CMD_MODE) & M_FW_HMA_CMD_MODE) #define F_FW_HMA_CMD_MODE V_FW_HMA_CMD_MODE(1U) #define S_FW_HMA_CMD_SOC 30 #define M_FW_HMA_CMD_SOC 0x1 #define V_FW_HMA_CMD_SOC(x) ((x) << S_FW_HMA_CMD_SOC) #define G_FW_HMA_CMD_SOC(x) (((x) >> S_FW_HMA_CMD_SOC) & M_FW_HMA_CMD_SOC) #define F_FW_HMA_CMD_SOC V_FW_HMA_CMD_SOC(1U) #define S_FW_HMA_CMD_EOC 29 #define M_FW_HMA_CMD_EOC 0x1 #define V_FW_HMA_CMD_EOC(x) ((x) << S_FW_HMA_CMD_EOC) #define G_FW_HMA_CMD_EOC(x) (((x) >> S_FW_HMA_CMD_EOC) & M_FW_HMA_CMD_EOC) #define F_FW_HMA_CMD_EOC V_FW_HMA_CMD_EOC(1U) #define S_FW_HMA_CMD_PCIE_PARAMS 0 #define M_FW_HMA_CMD_PCIE_PARAMS 0x7ffffff #define V_FW_HMA_CMD_PCIE_PARAMS(x) ((x) << S_FW_HMA_CMD_PCIE_PARAMS) #define G_FW_HMA_CMD_PCIE_PARAMS(x) \ (((x) >> S_FW_HMA_CMD_PCIE_PARAMS) & M_FW_HMA_CMD_PCIE_PARAMS) #define S_FW_HMA_CMD_NADDR 12 #define M_FW_HMA_CMD_NADDR 0x3f #define V_FW_HMA_CMD_NADDR(x) ((x) << S_FW_HMA_CMD_NADDR) #define G_FW_HMA_CMD_NADDR(x) \ (((x) >> S_FW_HMA_CMD_NADDR) & M_FW_HMA_CMD_NADDR) #define S_FW_HMA_CMD_SIZE 0 #define M_FW_HMA_CMD_SIZE 0xfff #define V_FW_HMA_CMD_SIZE(x) ((x) << S_FW_HMA_CMD_SIZE) #define G_FW_HMA_CMD_SIZE(x) \ (((x) >> S_FW_HMA_CMD_SIZE) & M_FW_HMA_CMD_SIZE) #define S_FW_HMA_CMD_ADDR_SIZE 11 #define M_FW_HMA_CMD_ADDR_SIZE 0x1fffff #define V_FW_HMA_CMD_ADDR_SIZE(x) ((x) << S_FW_HMA_CMD_ADDR_SIZE) #define G_FW_HMA_CMD_ADDR_SIZE(x) \ (((x) >> S_FW_HMA_CMD_ADDR_SIZE) & M_FW_HMA_CMD_ADDR_SIZE) /****************************************************************************** * P C I E F W R E G I S T E R **************************************/ enum pcie_fw_eval { PCIE_FW_EVAL_CRASH = 0, PCIE_FW_EVAL_PREP = 1, PCIE_FW_EVAL_CONF = 2, PCIE_FW_EVAL_INIT = 3, PCIE_FW_EVAL_UNEXPECTEDEVENT = 4, PCIE_FW_EVAL_OVERHEAT = 5, PCIE_FW_EVAL_DEVICESHUTDOWN = 6, }; /** * Register definitions for the PCIE_FW register which the firmware uses * to retain status across RESETs. This register should be considered * as a READ-ONLY register for Host Software and only to be used to * track firmware initialization/error state, etc. */ #define S_PCIE_FW_ERR 31 #define M_PCIE_FW_ERR 0x1 #define V_PCIE_FW_ERR(x) ((x) << S_PCIE_FW_ERR) #define G_PCIE_FW_ERR(x) (((x) >> S_PCIE_FW_ERR) & M_PCIE_FW_ERR) #define F_PCIE_FW_ERR V_PCIE_FW_ERR(1U) #define S_PCIE_FW_INIT 30 #define M_PCIE_FW_INIT 0x1 #define V_PCIE_FW_INIT(x) ((x) << S_PCIE_FW_INIT) #define G_PCIE_FW_INIT(x) (((x) >> S_PCIE_FW_INIT) & M_PCIE_FW_INIT) #define F_PCIE_FW_INIT V_PCIE_FW_INIT(1U) #define S_PCIE_FW_HALT 29 #define M_PCIE_FW_HALT 0x1 #define V_PCIE_FW_HALT(x) ((x) << S_PCIE_FW_HALT) #define G_PCIE_FW_HALT(x) (((x) >> S_PCIE_FW_HALT) & M_PCIE_FW_HALT) #define F_PCIE_FW_HALT V_PCIE_FW_HALT(1U) #define S_PCIE_FW_EVAL 24 #define M_PCIE_FW_EVAL 0x7 #define V_PCIE_FW_EVAL(x) ((x) << S_PCIE_FW_EVAL) #define G_PCIE_FW_EVAL(x) (((x) >> S_PCIE_FW_EVAL) & M_PCIE_FW_EVAL) #define S_PCIE_FW_STAGE 21 #define M_PCIE_FW_STAGE 0x7 #define V_PCIE_FW_STAGE(x) ((x) << S_PCIE_FW_STAGE) #define G_PCIE_FW_STAGE(x) (((x) >> S_PCIE_FW_STAGE) & M_PCIE_FW_STAGE) #define S_PCIE_FW_ASYNCNOT_VLD 20 #define M_PCIE_FW_ASYNCNOT_VLD 0x1 #define V_PCIE_FW_ASYNCNOT_VLD(x) \ ((x) << S_PCIE_FW_ASYNCNOT_VLD) #define G_PCIE_FW_ASYNCNOT_VLD(x) \ (((x) >> S_PCIE_FW_ASYNCNOT_VLD) & M_PCIE_FW_ASYNCNOT_VLD) #define F_PCIE_FW_ASYNCNOT_VLD V_PCIE_FW_ASYNCNOT_VLD(1U) #define S_PCIE_FW_ASYNCNOTINT 19 #define M_PCIE_FW_ASYNCNOTINT 0x1 #define V_PCIE_FW_ASYNCNOTINT(x) \ ((x) << S_PCIE_FW_ASYNCNOTINT) #define G_PCIE_FW_ASYNCNOTINT(x) \ (((x) >> S_PCIE_FW_ASYNCNOTINT) & M_PCIE_FW_ASYNCNOTINT) #define F_PCIE_FW_ASYNCNOTINT V_PCIE_FW_ASYNCNOTINT(1U) #define S_PCIE_FW_ASYNCNOT 16 #define M_PCIE_FW_ASYNCNOT 0x7 #define V_PCIE_FW_ASYNCNOT(x) ((x) << S_PCIE_FW_ASYNCNOT) #define G_PCIE_FW_ASYNCNOT(x) \ (((x) >> S_PCIE_FW_ASYNCNOT) & M_PCIE_FW_ASYNCNOT) #define S_PCIE_FW_MASTER_VLD 15 #define M_PCIE_FW_MASTER_VLD 0x1 #define V_PCIE_FW_MASTER_VLD(x) ((x) << S_PCIE_FW_MASTER_VLD) #define G_PCIE_FW_MASTER_VLD(x) \ (((x) >> S_PCIE_FW_MASTER_VLD) & M_PCIE_FW_MASTER_VLD) #define F_PCIE_FW_MASTER_VLD V_PCIE_FW_MASTER_VLD(1U) #define S_PCIE_FW_MASTER 12 #define M_PCIE_FW_MASTER 0x7 #define V_PCIE_FW_MASTER(x) ((x) << S_PCIE_FW_MASTER) #define G_PCIE_FW_MASTER(x) (((x) >> S_PCIE_FW_MASTER) & M_PCIE_FW_MASTER) #define S_PCIE_FW_RESET_VLD 11 #define M_PCIE_FW_RESET_VLD 0x1 #define V_PCIE_FW_RESET_VLD(x) ((x) << S_PCIE_FW_RESET_VLD) #define G_PCIE_FW_RESET_VLD(x) \ (((x) >> S_PCIE_FW_RESET_VLD) & M_PCIE_FW_RESET_VLD) #define F_PCIE_FW_RESET_VLD V_PCIE_FW_RESET_VLD(1U) #define S_PCIE_FW_RESET 8 #define M_PCIE_FW_RESET 0x7 #define V_PCIE_FW_RESET(x) ((x) << S_PCIE_FW_RESET) #define G_PCIE_FW_RESET(x) \ (((x) >> S_PCIE_FW_RESET) & M_PCIE_FW_RESET) #define S_PCIE_FW_REGISTERED 0 #define M_PCIE_FW_REGISTERED 0xff #define V_PCIE_FW_REGISTERED(x) ((x) << S_PCIE_FW_REGISTERED) #define G_PCIE_FW_REGISTERED(x) \ (((x) >> S_PCIE_FW_REGISTERED) & M_PCIE_FW_REGISTERED) /****************************************************************************** * P C I E F W P F 0 R E G I S T E R **********************************************/ /* * this register is available as 32-bit of persistent storage (across * PL_RST based chip-reset) for boot drivers (i.e. firmware and driver * will not write it) */ /****************************************************************************** * P C I E F W P F 7 R E G I S T E R **********************************************/ /* * PF7 stores the Firmware Device Log parameters which allows Host Drivers to * access the "devlog" which needing to contact firmware. The encoding is * mostly the same as that returned by the DEVLOG command except for the size * which is encoded as the number of entries in multiples-1 of 128 here rather * than the memory size as is done in the DEVLOG command. Thus, 0 means 128 * and 15 means 2048. This of course in turn constrains the allowed values * for the devlog size ... */ #define PCIE_FW_PF_DEVLOG 7 #define S_PCIE_FW_PF_DEVLOG_NENTRIES128 28 #define M_PCIE_FW_PF_DEVLOG_NENTRIES128 0xf #define V_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \ ((x) << S_PCIE_FW_PF_DEVLOG_NENTRIES128) #define G_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \ (((x) >> S_PCIE_FW_PF_DEVLOG_NENTRIES128) & \ M_PCIE_FW_PF_DEVLOG_NENTRIES128) #define S_PCIE_FW_PF_DEVLOG_ADDR16 4 #define M_PCIE_FW_PF_DEVLOG_ADDR16 0xffffff #define V_PCIE_FW_PF_DEVLOG_ADDR16(x) ((x) << S_PCIE_FW_PF_DEVLOG_ADDR16) #define G_PCIE_FW_PF_DEVLOG_ADDR16(x) \ (((x) >> S_PCIE_FW_PF_DEVLOG_ADDR16) & M_PCIE_FW_PF_DEVLOG_ADDR16) #define S_PCIE_FW_PF_DEVLOG_MEMTYPE 0 #define M_PCIE_FW_PF_DEVLOG_MEMTYPE 0xf #define V_PCIE_FW_PF_DEVLOG_MEMTYPE(x) ((x) << S_PCIE_FW_PF_DEVLOG_MEMTYPE) #define G_PCIE_FW_PF_DEVLOG_MEMTYPE(x) \ (((x) >> S_PCIE_FW_PF_DEVLOG_MEMTYPE) & M_PCIE_FW_PF_DEVLOG_MEMTYPE) /****************************************************************************** * B I N A R Y H E A D E R F O R M A T **********************************************/ /* * firmware binary header format */ struct fw_hdr { __u8 ver; __u8 chip; /* terminator chip family */ __be16 len512; /* bin length in units of 512-bytes */ __be32 fw_ver; /* firmware version */ __be32 tp_microcode_ver; /* tcp processor microcode version */ __u8 intfver_nic; __u8 intfver_vnic; __u8 intfver_ofld; __u8 intfver_ri; __u8 intfver_iscsipdu; __u8 intfver_iscsi; __u8 intfver_fcoepdu; __u8 intfver_fcoe; __u32 reserved2; __u32 reserved3; __be32 magic; /* runtime or bootstrap fw */ __be32 flags; __be32 reserved6[23]; }; enum fw_hdr_chip { FW_HDR_CHIP_T4, FW_HDR_CHIP_T5, FW_HDR_CHIP_T6 }; #define S_FW_HDR_FW_VER_MAJOR 24 #define M_FW_HDR_FW_VER_MAJOR 0xff #define V_FW_HDR_FW_VER_MAJOR(x) \ ((x) << S_FW_HDR_FW_VER_MAJOR) #define G_FW_HDR_FW_VER_MAJOR(x) \ (((x) >> S_FW_HDR_FW_VER_MAJOR) & M_FW_HDR_FW_VER_MAJOR) #define S_FW_HDR_FW_VER_MINOR 16 #define M_FW_HDR_FW_VER_MINOR 0xff #define V_FW_HDR_FW_VER_MINOR(x) \ ((x) << S_FW_HDR_FW_VER_MINOR) #define G_FW_HDR_FW_VER_MINOR(x) \ (((x) >> S_FW_HDR_FW_VER_MINOR) & M_FW_HDR_FW_VER_MINOR) #define S_FW_HDR_FW_VER_MICRO 8 #define M_FW_HDR_FW_VER_MICRO 0xff #define V_FW_HDR_FW_VER_MICRO(x) \ ((x) << S_FW_HDR_FW_VER_MICRO) #define G_FW_HDR_FW_VER_MICRO(x) \ (((x) >> S_FW_HDR_FW_VER_MICRO) & M_FW_HDR_FW_VER_MICRO) #define S_FW_HDR_FW_VER_BUILD 0 #define M_FW_HDR_FW_VER_BUILD 0xff #define V_FW_HDR_FW_VER_BUILD(x) \ ((x) << S_FW_HDR_FW_VER_BUILD) #define G_FW_HDR_FW_VER_BUILD(x) \ (((x) >> S_FW_HDR_FW_VER_BUILD) & M_FW_HDR_FW_VER_BUILD) enum { T4FW_VERSION_MAJOR = 1, T4FW_VERSION_MINOR = 25, T4FW_VERSION_MICRO = 0, T4FW_VERSION_BUILD = 40, T5FW_VERSION_MAJOR = 1, T5FW_VERSION_MINOR = 25, T5FW_VERSION_MICRO = 0, T5FW_VERSION_BUILD = 40, T6FW_VERSION_MAJOR = 1, T6FW_VERSION_MINOR = 25, T6FW_VERSION_MICRO = 0, T6FW_VERSION_BUILD = 40, }; enum { /* T4 */ T4FW_HDR_INTFVER_NIC = 0x00, T4FW_HDR_INTFVER_VNIC = 0x00, T4FW_HDR_INTFVER_OFLD = 0x00, T4FW_HDR_INTFVER_RI = 0x00, T4FW_HDR_INTFVER_ISCSIPDU= 0x00, T4FW_HDR_INTFVER_ISCSI = 0x00, T4FW_HDR_INTFVER_FCOEPDU = 0x00, T4FW_HDR_INTFVER_FCOE = 0x00, /* T5 */ T5FW_HDR_INTFVER_NIC = 0x00, T5FW_HDR_INTFVER_VNIC = 0x00, T5FW_HDR_INTFVER_OFLD = 0x00, T5FW_HDR_INTFVER_RI = 0x00, T5FW_HDR_INTFVER_ISCSIPDU= 0x00, T5FW_HDR_INTFVER_ISCSI = 0x00, T5FW_HDR_INTFVER_FCOEPDU= 0x00, T5FW_HDR_INTFVER_FCOE = 0x00, /* T6 */ T6FW_HDR_INTFVER_NIC = 0x00, T6FW_HDR_INTFVER_VNIC = 0x00, T6FW_HDR_INTFVER_OFLD = 0x00, T6FW_HDR_INTFVER_RI = 0x00, T6FW_HDR_INTFVER_ISCSIPDU= 0x00, T6FW_HDR_INTFVER_ISCSI = 0x00, T6FW_HDR_INTFVER_FCOEPDU= 0x00, T6FW_HDR_INTFVER_FCOE = 0x00, }; #define FW_VERSION32(MAJOR, MINOR, MICRO, BUILD) ( \ V_FW_HDR_FW_VER_MAJOR(MAJOR) | V_FW_HDR_FW_VER_MINOR(MINOR) | \ V_FW_HDR_FW_VER_MICRO(MICRO) | V_FW_HDR_FW_VER_BUILD(BUILD)) enum { FW_HDR_MAGIC_RUNTIME = 0x00000000, FW_HDR_MAGIC_BOOTSTRAP = 0x626f6f74, }; enum fw_hdr_flags { FW_HDR_FLAGS_RESET_HALT = 0x00000001, }; /* * External PHY firmware binary header format */ struct fw_ephy_hdr { __u8 ver; __u8 reserved; __be16 len512; /* bin length in units of 512-bytes */ __be32 magic; __be16 vendor_id; __be16 device_id; __be32 version; __be32 reserved1[4]; }; enum { FW_EPHY_HDR_MAGIC = 0x65706879, }; struct fw_ifconf_dhcp_info { __be32 addr; __be32 mask; __be16 vlanid; __be16 mtu; __be32 gw; __u8 op; __u8 len; __u8 data[270]; }; struct fw_ifconf_ping_info { __be16 ping_pldsize; }; #endif /* _T4FW_INTERFACE_H_ */ diff --git a/sys/dev/cxgbe/t4_filter.c b/sys/dev/cxgbe/t4_filter.c index 65a87f4c4163..1e0269fcd5c0 100644 --- a/sys/dev/cxgbe/t4_filter.c +++ b/sys/dev/cxgbe/t4_filter.c @@ -1,1964 +1,2002 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2018 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common/common.h" #include "common/t4_msg.h" #include "common/t4_regs.h" #include "common/t4_regs_values.h" #include "common/t4_tcb.h" #include "t4_l2t.h" #include "t4_smt.h" struct filter_entry { LIST_ENTRY(filter_entry) link_4t; LIST_ENTRY(filter_entry) link_tid; uint32_t valid:1; /* filter allocated and valid */ uint32_t locked:1; /* filter is administratively locked or busy */ uint32_t pending:1; /* filter action is pending firmware reply */ int tid; /* tid of the filter TCB */ struct l2t_entry *l2te; /* L2 table entry for DMAC rewrite */ struct smt_entry *smt; /* SMT entry for SMAC rewrite */ struct t4_filter_specification fs; }; static void free_filter_resources(struct filter_entry *); static int get_tcamfilter(struct adapter *, struct t4_filter *); static int get_hashfilter(struct adapter *, struct t4_filter *); static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t, struct l2t_entry *, struct smt_entry *); static int del_hashfilter(struct adapter *, struct t4_filter *); static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *); static inline bool separate_hpfilter_region(struct adapter *sc) { return (chip_id(sc) >= CHELSIO_T6); } static inline uint32_t hf_hashfn_4t(struct t4_filter_specification *fs) { struct t4_filter_tuple *ft = &fs->val; uint32_t hash; if (fs->type) { /* IPv6 */ hash = fnv_32_buf(&ft->sip[0], 16, FNV1_32_INIT); hash = fnv_32_buf(&ft->dip[0], 16, hash); } else { hash = fnv_32_buf(&ft->sip[0], 4, FNV1_32_INIT); hash = fnv_32_buf(&ft->dip[0], 4, hash); } hash = fnv_32_buf(&ft->sport, sizeof(ft->sport), hash); hash = fnv_32_buf(&ft->dport, sizeof(ft->dport), hash); return (hash); } static inline uint32_t hf_hashfn_tid(int tid) { return (fnv_32_buf(&tid, sizeof(tid), FNV1_32_INIT)); } static int alloc_hftid_hash(struct tid_info *t, int flags) { int n; MPASS(t->ntids > 0); MPASS(t->hftid_hash_4t == NULL); MPASS(t->hftid_hash_tid == NULL); n = max(t->ntids / 1024, 16); t->hftid_hash_4t = hashinit_flags(n, M_CXGBE, &t->hftid_4t_mask, flags); if (t->hftid_hash_4t == NULL) return (ENOMEM); t->hftid_hash_tid = hashinit_flags(n, M_CXGBE, &t->hftid_tid_mask, flags); if (t->hftid_hash_tid == NULL) { hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask); t->hftid_hash_4t = NULL; return (ENOMEM); } mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF); cv_init(&t->hftid_cv, "t4hfcv"); return (0); } void free_hftid_hash(struct tid_info *t) { struct filter_entry *f, *ftmp; LIST_HEAD(, filter_entry) *head; int i; #ifdef INVARIANTS int n = 0; #endif if (t->tids_in_use > 0) { /* Remove everything from the tid hash. */ head = t->hftid_hash_tid; for (i = 0; i <= t->hftid_tid_mask; i++) { LIST_FOREACH_SAFE(f, &head[i], link_tid, ftmp) { LIST_REMOVE(f, link_tid); } } /* Remove and then free each filter in the 4t hash. */ head = t->hftid_hash_4t; for (i = 0; i <= t->hftid_4t_mask; i++) { LIST_FOREACH_SAFE(f, &head[i], link_4t, ftmp) { #ifdef INVARIANTS n += f->fs.type ? 2 : 1; #endif LIST_REMOVE(f, link_4t); free(f, M_CXGBE); } } MPASS(t->tids_in_use == n); t->tids_in_use = 0; } if (t->hftid_hash_4t) { hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask); t->hftid_hash_4t = NULL; } if (t->hftid_hash_tid) { hashdestroy(t->hftid_hash_tid, M_CXGBE, t->hftid_tid_mask); t->hftid_hash_tid = NULL; } if (mtx_initialized(&t->hftid_lock)) { mtx_destroy(&t->hftid_lock); cv_destroy(&t->hftid_cv); } } static void insert_hf(struct adapter *sc, struct filter_entry *f, uint32_t hash) { struct tid_info *t = &sc->tids; LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t; MPASS(head != NULL); if (hash == 0) hash = hf_hashfn_4t(&f->fs); LIST_INSERT_HEAD(&head[hash & t->hftid_4t_mask], f, link_4t); atomic_add_int(&t->tids_in_use, f->fs.type ? 2 : 1); } static void insert_hftid(struct adapter *sc, struct filter_entry *f) { struct tid_info *t = &sc->tids; LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid; uint32_t hash; MPASS(f->tid >= t->tid_base); MPASS(f->tid - t->tid_base < t->ntids); mtx_assert(&t->hftid_lock, MA_OWNED); hash = hf_hashfn_tid(f->tid); LIST_INSERT_HEAD(&head[hash & t->hftid_tid_mask], f, link_tid); } static bool filter_eq(struct t4_filter_specification *fs1, struct t4_filter_specification *fs2) { int n; MPASS(fs1->hash && fs2->hash); if (fs1->type != fs2->type) return (false); n = fs1->type ? 16 : 4; if (bcmp(&fs1->val.sip[0], &fs2->val.sip[0], n) || bcmp(&fs1->val.dip[0], &fs2->val.dip[0], n) || fs1->val.sport != fs2->val.sport || fs1->val.dport != fs2->val.dport) return (false); /* - * We know the masks are the same because all hashfilter masks have to - * conform to the global tp->hash_filter_mask and the driver has - * verified that already. + * We know the masks are the same because all hashfilters conform to the + * global tp->filter_mask and the driver has verified that already. */ if ((fs1->mask.pfvf_vld || fs1->mask.ovlan_vld) && fs1->val.vnic != fs2->val.vnic) return (false); if (fs1->mask.vlan_vld && fs1->val.vlan != fs2->val.vlan) return (false); if (fs1->mask.macidx && fs1->val.macidx != fs2->val.macidx) return (false); if (fs1->mask.frag && fs1->val.frag != fs2->val.frag) return (false); if (fs1->mask.matchtype && fs1->val.matchtype != fs2->val.matchtype) return (false); if (fs1->mask.iport && fs1->val.iport != fs2->val.iport) return (false); if (fs1->mask.fcoe && fs1->val.fcoe != fs2->val.fcoe) return (false); if (fs1->mask.proto && fs1->val.proto != fs2->val.proto) return (false); if (fs1->mask.tos && fs1->val.tos != fs2->val.tos) return (false); if (fs1->mask.ethtype && fs1->val.ethtype != fs2->val.ethtype) return (false); return (true); } static struct filter_entry * lookup_hf(struct adapter *sc, struct t4_filter_specification *fs, uint32_t hash) { struct tid_info *t = &sc->tids; LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t; struct filter_entry *f; mtx_assert(&t->hftid_lock, MA_OWNED); MPASS(head != NULL); if (hash == 0) hash = hf_hashfn_4t(fs); LIST_FOREACH(f, &head[hash & t->hftid_4t_mask], link_4t) { if (filter_eq(&f->fs, fs)) return (f); } return (NULL); } static struct filter_entry * lookup_hftid(struct adapter *sc, int tid) { struct tid_info *t = &sc->tids; LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid; struct filter_entry *f; uint32_t hash; mtx_assert(&t->hftid_lock, MA_OWNED); MPASS(head != NULL); hash = hf_hashfn_tid(tid); LIST_FOREACH(f, &head[hash & t->hftid_tid_mask], link_tid) { if (f->tid == tid) return (f); } return (NULL); } static void remove_hf(struct adapter *sc, struct filter_entry *f) { struct tid_info *t = &sc->tids; mtx_assert(&t->hftid_lock, MA_OWNED); LIST_REMOVE(f, link_4t); atomic_subtract_int(&t->tids_in_use, f->fs.type ? 2 : 1); } static void remove_hftid(struct adapter *sc, struct filter_entry *f) { #ifdef INVARIANTS struct tid_info *t = &sc->tids; mtx_assert(&t->hftid_lock, MA_OWNED); #endif LIST_REMOVE(f, link_tid); } -static uint32_t +/* + * Input: driver's 32b filter mode. + * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input. + */ +static uint16_t mode_to_fconf(uint32_t mode) { uint32_t fconf = 0; if (mode & T4_FILTER_IP_FRAGMENT) fconf |= F_FRAGMENTATION; if (mode & T4_FILTER_MPS_HIT_TYPE) fconf |= F_MPSHITTYPE; if (mode & T4_FILTER_MAC_IDX) fconf |= F_MACMATCH; if (mode & T4_FILTER_ETH_TYPE) fconf |= F_ETHERTYPE; if (mode & T4_FILTER_IP_PROTO) fconf |= F_PROTOCOL; if (mode & T4_FILTER_IP_TOS) fconf |= F_TOS; if (mode & T4_FILTER_VLAN) fconf |= F_VLAN; if (mode & T4_FILTER_VNIC) fconf |= F_VNIC_ID; if (mode & T4_FILTER_PORT) fconf |= F_PORT; if (mode & T4_FILTER_FCoE) fconf |= F_FCOE; return (fconf); } -static uint32_t +/* + * Input: driver's 32b filter mode. + * Returns: hardware vnic mode (ingress config) matching the input. + */ +static int mode_to_iconf(uint32_t mode) { + if ((mode & T4_FILTER_VNIC) == 0) + return (-1); /* ingress config doesn't matter. */ if (mode & T4_FILTER_IC_VNIC) - return (F_VNIC); - return (0); + return (FW_VNIC_MODE_PF_VF); + else if (mode & T4_FILTER_IC_ENCAP) + return (FW_VNIC_MODE_ENCAP_EN); + else + return (FW_VNIC_MODE_OUTER_VLAN); } static int check_fspec_against_fconf_iconf(struct adapter *sc, struct t4_filter_specification *fs) { struct tp_params *tpp = &sc->params.tp; uint32_t fconf = 0; if (fs->val.frag || fs->mask.frag) fconf |= F_FRAGMENTATION; if (fs->val.matchtype || fs->mask.matchtype) fconf |= F_MPSHITTYPE; if (fs->val.macidx || fs->mask.macidx) fconf |= F_MACMATCH; if (fs->val.ethtype || fs->mask.ethtype) fconf |= F_ETHERTYPE; if (fs->val.proto || fs->mask.proto) fconf |= F_PROTOCOL; if (fs->val.tos || fs->mask.tos) fconf |= F_TOS; if (fs->val.vlan_vld || fs->mask.vlan_vld) fconf |= F_VLAN; if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { - fconf |= F_VNIC_ID; - if (tpp->ingress_config & F_VNIC) + if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN) return (EINVAL); + fconf |= F_VNIC_ID; } if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { + if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF) + return (EINVAL); fconf |= F_VNIC_ID; - if ((tpp->ingress_config & F_VNIC) == 0) + } + +#ifdef notyet + if (fs->val.encap_vld || fs->mask.encap_vld) { + if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN); return (EINVAL); + fconf |= F_VNIC_ID; } +#endif if (fs->val.iport || fs->mask.iport) fconf |= F_PORT; if (fs->val.fcoe || fs->mask.fcoe) fconf |= F_FCOE; - if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) + if ((tpp->filter_mode | fconf) != tpp->filter_mode) return (E2BIG); return (0); } +/* + * Input: hardware filter configuration (filter mode/mask, ingress config). + * Input: driver's 32b filter mode matching the input. + */ +static uint32_t +fconf_to_mode(uint16_t hwmode, int vnic_mode) +{ + uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | + T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; + + if (hwmode & F_FRAGMENTATION) + mode |= T4_FILTER_IP_FRAGMENT; + if (hwmode & F_MPSHITTYPE) + mode |= T4_FILTER_MPS_HIT_TYPE; + if (hwmode & F_MACMATCH) + mode |= T4_FILTER_MAC_IDX; + if (hwmode & F_ETHERTYPE) + mode |= T4_FILTER_ETH_TYPE; + if (hwmode & F_PROTOCOL) + mode |= T4_FILTER_IP_PROTO; + if (hwmode & F_TOS) + mode |= T4_FILTER_IP_TOS; + if (hwmode & F_VLAN) + mode |= T4_FILTER_VLAN; + if (hwmode & F_VNIC_ID) + mode |= T4_FILTER_VNIC; /* real meaning depends on vnic_mode. */ + if (hwmode & F_PORT) + mode |= T4_FILTER_PORT; + if (hwmode & F_FCOE) + mode |= T4_FILTER_FCoE; + + switch (vnic_mode) { + case FW_VNIC_MODE_PF_VF: + mode |= T4_FILTER_IC_VNIC; + break; + case FW_VNIC_MODE_ENCAP_EN: + mode |= T4_FILTER_IC_ENCAP; + break; + case FW_VNIC_MODE_OUTER_VLAN: + default: + break; + } + + return (mode); +} + int get_filter_mode(struct adapter *sc, uint32_t *mode) { struct tp_params *tp = &sc->params.tp; - uint64_t mask; + uint16_t filter_mode; - /* Non-zero incoming value in mode means "hashfilter mode". */ - mask = *mode ? tp->hash_filter_mask : UINT64_MAX; + /* Filter mask must comply with the global filter mode. */ + MPASS((tp->filter_mode | tp->filter_mask) == tp->filter_mode); - /* Always */ - *mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | - T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; - -#define CHECK_FIELD(fconf_bit, field_shift, field_mask, mode_bit) do { \ - if (tp->vlan_pri_map & (fconf_bit)) { \ - MPASS(tp->field_shift >= 0); \ - if ((mask >> tp->field_shift & field_mask) == field_mask) \ - *mode |= (mode_bit); \ - } \ -} while (0) - - CHECK_FIELD(F_FRAGMENTATION, frag_shift, M_FT_FRAGMENTATION, T4_FILTER_IP_FRAGMENT); - CHECK_FIELD(F_MPSHITTYPE, matchtype_shift, M_FT_MPSHITTYPE, T4_FILTER_MPS_HIT_TYPE); - CHECK_FIELD(F_MACMATCH, macmatch_shift, M_FT_MACMATCH, T4_FILTER_MAC_IDX); - CHECK_FIELD(F_ETHERTYPE, ethertype_shift, M_FT_ETHERTYPE, T4_FILTER_ETH_TYPE); - CHECK_FIELD(F_PROTOCOL, protocol_shift, M_FT_PROTOCOL, T4_FILTER_IP_PROTO); - CHECK_FIELD(F_TOS, tos_shift, M_FT_TOS, T4_FILTER_IP_TOS); - CHECK_FIELD(F_VLAN, vlan_shift, M_FT_VLAN, T4_FILTER_VLAN); - CHECK_FIELD(F_VNIC_ID, vnic_shift, M_FT_VNIC_ID , T4_FILTER_VNIC); - if (tp->ingress_config & F_VNIC) - *mode |= T4_FILTER_IC_VNIC; - CHECK_FIELD(F_PORT, port_shift, M_FT_PORT , T4_FILTER_PORT); - CHECK_FIELD(F_FCOE, fcoe_shift, M_FT_FCOE , T4_FILTER_FCoE); -#undef CHECK_FIELD + /* Non-zero incoming value in mode means "hashfilter mode". */ + filter_mode = *mode ? tp->filter_mask : tp->filter_mode; + *mode = fconf_to_mode(filter_mode, tp->vnic_mode); return (0); } int set_filter_mode(struct adapter *sc, uint32_t mode) { - struct tp_params *tpp = &sc->params.tp; - uint32_t fconf, iconf; - int rc; + struct tp_params *tp = &sc->params.tp; + int rc, iconf; + uint16_t fconf; iconf = mode_to_iconf(mode); - if ((iconf ^ tpp->ingress_config) & F_VNIC) { - /* - * For now we just complain if A_TP_INGRESS_CONFIG is not - * already set to the correct value for the requested filter - * mode. It's not clear if it's safe to write to this register - * on the fly. (And we trust the cached value of the register). - * - * check_fspec_against_fconf_iconf and other code that looks at - * tp->vlan_pri_map and tp->ingress_config needs to be reviewed - * thorougly before allowing dynamic filter mode changes. - */ - return (EBUSY); - } - fconf = mode_to_fconf(mode); + if ((iconf == -1 || iconf == tp->vnic_mode) && fconf == tp->filter_mode) + return (0); /* Nothing to do */ - rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, - "t4setfm"); + rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setfm"); if (rc) return (rc); - if (sc->tids.ftids_in_use > 0 || sc->tids.hpftids_in_use > 0) { + if (sc->tids.ftids_in_use > 0 || /* TCAM filters active */ + sc->tids.hpftids_in_use > 0 || /* hi-pri TCAM filters active */ + sc->tids.tids_in_use > 0) { /* TOE or hashfilters active */ rc = EBUSY; goto done; } #ifdef TCP_OFFLOAD if (uld_active(sc, ULD_TOM)) { rc = EBUSY; goto done; } #endif - rc = -t4_set_filter_mode(sc, fconf, true); + /* Note that filter mask will get clipped to the new filter mode. */ + rc = -t4_set_filter_cfg(sc, fconf, -1, iconf); done: - end_synchronized_op(sc, LOCK_HELD); + end_synchronized_op(sc, 0); return (rc); } static inline uint64_t get_filter_hits(struct adapter *sc, uint32_t tid) { uint32_t tcb_addr; tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE; if (is_t4(sc)) { uint64_t hits; read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8); return (be64toh(hits)); } else { uint32_t hits; read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4); return (be32toh(hits)); } } int get_filter(struct adapter *sc, struct t4_filter *t) { if (t->fs.hash) return (get_hashfilter(sc, t)); else return (get_tcamfilter(sc, t)); } static int set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te, struct smt_entry *smt) { struct filter_entry *f; struct fw_filter2_wr *fwr; u_int vnic_vld, vnic_vld_mask; struct wrq_cookie cookie; int i, rc, busy, locked; u_int tid; const int ntids = t->fs.type ? 4 : 1; MPASS(!t->fs.hash); /* Already validated against fconf, iconf */ MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0); MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0); if (separate_hpfilter_region(sc) && t->fs.prio) { MPASS(t->idx < sc->tids.nhpftids); f = &sc->tids.hpftid_tab[t->idx]; tid = sc->tids.hpftid_base + t->idx; } else { MPASS(t->idx < sc->tids.nftids); f = &sc->tids.ftid_tab[t->idx]; tid = sc->tids.ftid_base + t->idx; } rc = busy = locked = 0; mtx_lock(&sc->tids.ftid_lock); for (i = 0; i < ntids; i++) { busy += f[i].pending + f[i].valid; locked += f[i].locked; } if (locked > 0) rc = EPERM; else if (busy > 0) rc = EBUSY; else { int len16; if (sc->params.filter2_wr_support) len16 = howmany(sizeof(struct fw_filter2_wr), 16); else len16 = howmany(sizeof(struct fw_filter_wr), 16); fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie); if (__predict_false(fwr == NULL)) rc = ENOMEM; else { f->pending = 1; if (separate_hpfilter_region(sc) && t->fs.prio) sc->tids.hpftids_in_use++; else sc->tids.ftids_in_use++; } } mtx_unlock(&sc->tids.ftid_lock); if (rc != 0) return (rc); /* * Can't fail now. A set-filter WR will definitely be sent. */ f->tid = tid; f->fs = t->fs; f->l2te = l2te; f->smt = smt; if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld) vnic_vld = 1; else vnic_vld = 0; if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld) vnic_vld_mask = 1; else vnic_vld_mask = 0; bzero(fwr, sizeof(*fwr)); if (sc->params.filter2_wr_support) fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR)); else fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); fwr->tid_to_iq = htobe32(V_FW_FILTER_WR_TID(f->tid) | V_FW_FILTER_WR_RQTYPE(f->fs.type) | V_FW_FILTER_WR_NOREPLY(0) | V_FW_FILTER_WR_IQ(f->fs.iq)); fwr->del_filter_to_l2tix = htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | V_FW_FILTER_WR_DMAC(f->fs.newdmac) | V_FW_FILTER_WR_SMAC(f->fs.newsmac) | V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) | V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || f->fs.newvlan == VLAN_REWRITE) | V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | V_FW_FILTER_WR_TXCHAN(f->fs.eport) | V_FW_FILTER_WR_PRIO(f->fs.prio) | V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0)); fwr->ethtype = htobe16(f->fs.val.ethtype); fwr->ethtypem = htobe16(f->fs.mask.ethtype); fwr->frag_to_ovlan_vldm = (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); fwr->smac_sel = 0; fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); fwr->maci_to_matchtypem = htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | V_FW_FILTER_WR_PORT(f->fs.val.iport) | V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); fwr->ptcl = f->fs.val.proto; fwr->ptclm = f->fs.mask.proto; fwr->ttyp = f->fs.val.tos; fwr->ttypm = f->fs.mask.tos; fwr->ivlan = htobe16(f->fs.val.vlan); fwr->ivlanm = htobe16(f->fs.mask.vlan); fwr->ovlan = htobe16(f->fs.val.vnic); fwr->ovlanm = htobe16(f->fs.mask.vnic); bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); fwr->lp = htobe16(f->fs.val.dport); fwr->lpm = htobe16(f->fs.mask.dport); fwr->fp = htobe16(f->fs.val.sport); fwr->fpm = htobe16(f->fs.mask.sport); /* sma = 0 tells the fw to use SMAC_SEL for source MAC address */ bzero(fwr->sma, sizeof (fwr->sma)); if (sc->params.filter2_wr_support) { fwr->filter_type_swapmac = V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac); fwr->natmode_to_ulp_type = V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) | V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) | V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode); memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip)); memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip)); fwr->newlport = htobe16(f->fs.nat_dport); fwr->newfport = htobe16(f->fs.nat_sport); fwr->natseqcheck = htobe32(f->fs.nat_seq_chk); } commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie); /* Wait for response. */ mtx_lock(&sc->tids.ftid_lock); for (;;) { if (f->pending == 0) { rc = f->valid ? 0 : EIO; break; } if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) { rc = EINPROGRESS; break; } } mtx_unlock(&sc->tids.ftid_lock); return (rc); } static int hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs, uint64_t *ftuple) { struct tp_params *tp = &sc->params.tp; - uint64_t fmask; + uint16_t fmask; *ftuple = fmask = 0; /* * Initialize each of the fields which we care about which are present * in the Compressed Filter Tuple. */ if (tp->vlan_shift >= 0 && fs->mask.vlan) { - *ftuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift; - fmask |= M_FT_VLAN << tp->vlan_shift; + *ftuple |= (uint64_t)(F_FT_VLAN_VLD | fs->val.vlan) << + tp->vlan_shift; + fmask |= F_VLAN; } if (tp->port_shift >= 0 && fs->mask.iport) { *ftuple |= (uint64_t)fs->val.iport << tp->port_shift; - fmask |= M_FT_PORT << tp->port_shift; + fmask |= F_PORT; } if (tp->protocol_shift >= 0 && fs->mask.proto) { *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift; - fmask |= M_FT_PROTOCOL << tp->protocol_shift; + fmask |= F_PROTOCOL; } if (tp->tos_shift >= 0 && fs->mask.tos) { *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift; - fmask |= M_FT_TOS << tp->tos_shift; + fmask |= F_TOS; } if (tp->vnic_shift >= 0 && fs->mask.vnic) { - /* F_VNIC in ingress config was already validated. */ - if (tp->ingress_config & F_VNIC) + /* vnic_mode was already validated. */ + if (tp->vnic_mode == FW_VNIC_MODE_PF_VF) MPASS(fs->mask.pfvf_vld); - else + else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN) MPASS(fs->mask.ovlan_vld); - +#ifdef notyet + else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN) + MPASS(fs->mask.encap_vld); +#endif *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift; - fmask |= M_FT_VNIC_ID << tp->vnic_shift; + fmask |= F_VNIC_ID; } if (tp->macmatch_shift >= 0 && fs->mask.macidx) { *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift; - fmask |= M_FT_MACMATCH << tp->macmatch_shift; + fmask |= F_MACMATCH; } if (tp->ethertype_shift >= 0 && fs->mask.ethtype) { *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift; - fmask |= M_FT_ETHERTYPE << tp->ethertype_shift; + fmask |= F_ETHERTYPE; } if (tp->matchtype_shift >= 0 && fs->mask.matchtype) { *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift; - fmask |= M_FT_MPSHITTYPE << tp->matchtype_shift; + fmask |= F_MPSHITTYPE; } if (tp->frag_shift >= 0 && fs->mask.frag) { *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift; - fmask |= M_FT_FRAGMENTATION << tp->frag_shift; + fmask |= F_FRAGMENTATION; } if (tp->fcoe_shift >= 0 && fs->mask.fcoe) { *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift; - fmask |= M_FT_FCOE << tp->fcoe_shift; + fmask |= F_FCOE; } - /* A hashfilter must conform to the filterMask. */ - if (fmask != tp->hash_filter_mask) + /* A hashfilter must conform to the hardware filter mask. */ + if (fmask != tp->filter_mask) return (EINVAL); return (0); } static bool is_4tuple_specified(struct t4_filter_specification *fs) { int i; const int n = fs->type ? 16 : 4; if (fs->mask.sport != 0xffff || fs->mask.dport != 0xffff) return (false); for (i = 0; i < n; i++) { if (fs->mask.sip[i] != 0xff) return (false); if (fs->mask.dip[i] != 0xff) return (false); } return (true); } int set_filter(struct adapter *sc, struct t4_filter *t) { struct tid_info *ti = &sc->tids; struct l2t_entry *l2te = NULL; struct smt_entry *smt = NULL; uint64_t ftuple; int rc; /* * Basic filter checks first. */ if (t->fs.hash) { if (!is_hashfilter(sc) || ti->ntids == 0) return (ENOTSUP); /* Hardware, not user, selects a tid for hashfilters. */ if (t->idx != (uint32_t)-1) return (EINVAL); /* T5 can't count hashfilter hits. */ if (is_t5(sc) && t->fs.hitcnts) return (EINVAL); if (!is_4tuple_specified(&t->fs)) return (EINVAL); rc = hashfilter_ntuple(sc, &t->fs, &ftuple); if (rc != 0) return (rc); } else { if (separate_hpfilter_region(sc) && t->fs.prio) { if (ti->nhpftids == 0) return (ENOTSUP); if (t->idx >= ti->nhpftids) return (EINVAL); } else { if (ti->nftids == 0) return (ENOTSUP); if (t->idx >= ti->nftids) return (EINVAL); } /* IPv6 filter idx must be 4 aligned */ if (t->fs.type == 1 && ((t->idx & 0x3) || t->idx + 4 >= ti->nftids)) return (EINVAL); } /* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */ if (is_t4(sc) && t->fs.action == FILTER_SWITCH && (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE || t->fs.swapmac || t->fs.nat_mode)) return (ENOTSUP); if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports) return (EINVAL); if (t->fs.val.iport >= sc->params.nports) return (EINVAL); /* Can't specify an iqid/rss_info if not steering. */ if (!t->fs.dirsteer && !t->fs.dirsteerhash && !t->fs.maskhash && t->fs.iq) return (EINVAL); /* Validate against the global filter mode and ingress config */ rc = check_fspec_against_fconf_iconf(sc, &t->fs); if (rc != 0) return (rc); /* * Basic checks passed. Make sure the queues and tid tables are setup. */ rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); if (rc) return (rc); if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_full_init(sc)) != 0)) { end_synchronized_op(sc, 0); return (rc); } if (t->fs.hash) { if (__predict_false(ti->hftid_hash_4t == NULL)) { rc = alloc_hftid_hash(&sc->tids, HASH_NOWAIT); if (rc != 0) goto done; } } else if (separate_hpfilter_region(sc) && t->fs.prio && __predict_false(ti->hpftid_tab == NULL)) { MPASS(ti->nhpftids != 0); KASSERT(ti->hpftids_in_use == 0, ("%s: no memory allocated but hpftids_in_use is %u", __func__, ti->hpftids_in_use)); ti->hpftid_tab = malloc(sizeof(struct filter_entry) * ti->nhpftids, M_CXGBE, M_NOWAIT | M_ZERO); if (ti->hpftid_tab == NULL) { rc = ENOMEM; goto done; } if (!mtx_initialized(&sc->tids.ftid_lock)) { mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF); cv_init(&ti->ftid_cv, "t4fcv"); } } else if (__predict_false(ti->ftid_tab == NULL)) { MPASS(ti->nftids != 0); KASSERT(ti->ftids_in_use == 0, ("%s: no memory allocated but ftids_in_use is %u", __func__, ti->ftids_in_use)); ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids, M_CXGBE, M_NOWAIT | M_ZERO); if (ti->ftid_tab == NULL) { rc = ENOMEM; goto done; } if (!mtx_initialized(&sc->tids.ftid_lock)) { mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF); cv_init(&ti->ftid_cv, "t4fcv"); } } done: end_synchronized_op(sc, 0); if (rc != 0) return (rc); /* * Allocate L2T entry, SMT entry, etc. */ if (t->fs.newdmac || t->fs.newvlan) { /* This filter needs an L2T entry; allocate one. */ l2te = t4_l2t_alloc_switching(sc, t->fs.vlan, t->fs.eport, t->fs.dmac); if (__predict_false(l2te == NULL)) { rc = EAGAIN; goto error; } } if (t->fs.newsmac) { /* This filter needs an SMT entry; allocate one. */ smt = t4_smt_alloc_switching(sc->smt, t->fs.smac); if (__predict_false(smt == NULL)) { rc = EAGAIN; goto error; } rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac); if (rc) goto error; } if (t->fs.hash) rc = set_hashfilter(sc, t, ftuple, l2te, smt); else rc = set_tcamfilter(sc, t, l2te, smt); if (rc != 0 && rc != EINPROGRESS) { error: if (l2te) t4_l2t_release(l2te); if (smt) t4_smt_release(smt); } return (rc); } static int del_tcamfilter(struct adapter *sc, struct t4_filter *t) { struct filter_entry *f; struct fw_filter_wr *fwr; struct wrq_cookie cookie; int rc, nfilters; #ifdef INVARIANTS u_int tid_base; #endif mtx_lock(&sc->tids.ftid_lock); if (separate_hpfilter_region(sc) && t->fs.prio) { nfilters = sc->tids.nhpftids; f = sc->tids.hpftid_tab; #ifdef INVARIANTS tid_base = sc->tids.hpftid_base; #endif } else { nfilters = sc->tids.nftids; f = sc->tids.ftid_tab; #ifdef INVARIANTS tid_base = sc->tids.ftid_base; #endif } MPASS(f != NULL); /* Caller checked this. */ if (t->idx >= nfilters) { rc = EINVAL; goto done; } f += t->idx; if (f->locked) { rc = EPERM; goto done; } if (f->pending) { rc = EBUSY; goto done; } if (f->valid == 0) { rc = EINVAL; goto done; } MPASS(f->tid == tid_base + t->idx); fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie); if (fwr == NULL) { rc = ENOMEM; goto done; } bzero(fwr, sizeof (*fwr)); t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id); f->pending = 1; commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie); t->fs = f->fs; /* extra info for the caller */ for (;;) { if (f->pending == 0) { rc = f->valid ? EIO : 0; break; } if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) { rc = EINPROGRESS; break; } } done: mtx_unlock(&sc->tids.ftid_lock); return (rc); } int del_filter(struct adapter *sc, struct t4_filter *t) { /* No filters possible if not initialized yet. */ if (!(sc->flags & FULL_INIT_DONE)) return (EINVAL); /* * The checks for tid tables ensure that the locks that del_* will reach * for are initialized. */ if (t->fs.hash) { if (sc->tids.hftid_hash_4t != NULL) return (del_hashfilter(sc, t)); } else if (separate_hpfilter_region(sc) && t->fs.prio) { if (sc->tids.hpftid_tab != NULL) return (del_tcamfilter(sc, t)); } else { if (sc->tids.ftid_tab != NULL) return (del_tcamfilter(sc, t)); } return (EINVAL); } /* * Release secondary resources associated with the filter. */ static void free_filter_resources(struct filter_entry *f) { if (f->l2te) { t4_l2t_release(f->l2te); f->l2te = NULL; } if (f->smt) { t4_smt_release(f->smt); f->smt = NULL; } } static int set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask, uint64_t val, int no_reply) { struct wrq_cookie cookie; struct cpl_set_tcb_field *req; req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie); if (req == NULL) return (ENOMEM); bzero(req, sizeof(*req)); INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid); if (no_reply == 0) { req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) | V_NO_REPLY(0)); } else req->reply_ctrl = htobe16(V_NO_REPLY(1)); req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER)); req->mask = htobe64(mask); req->val = htobe64(val); commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie); return (0); } /* Set one of the t_flags bits in the TCB. */ static inline int set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val, u_int no_reply) { return (set_tcb_field(sc, tid, W_TCB_T_FLAGS, 1ULL << bit_pos, (uint64_t)val << bit_pos, no_reply)); } int t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); u_int tid = GET_TID(rpl); u_int rc, idx; struct filter_entry *f; KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, rss->opcode)); if (is_hpftid(sc, tid)) { idx = tid - sc->tids.hpftid_base; f = &sc->tids.hpftid_tab[idx]; } else if (is_ftid(sc, tid)) { idx = tid - sc->tids.ftid_base; f = &sc->tids.ftid_tab[idx]; } else panic("%s: FW reply for invalid TID %d.", __func__, tid); MPASS(f->tid == tid); rc = G_COOKIE(rpl->cookie); mtx_lock(&sc->tids.ftid_lock); KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.", __func__, rc, tid)); switch(rc) { case FW_FILTER_WR_FLT_ADDED: /* set-filter succeeded */ f->valid = 1; if (f->fs.newsmac) { MPASS(f->smt != NULL); set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1); set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL, V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx), 1); /* XXX: wait for reply to TCB update before !pending */ } break; case FW_FILTER_WR_FLT_DELETED: /* del-filter succeeded */ MPASS(f->valid == 1); f->valid = 0; /* Fall through */ case FW_FILTER_WR_SMT_TBL_FULL: /* set-filter failed due to lack of SMT space. */ MPASS(f->valid == 0); free_filter_resources(f); if (separate_hpfilter_region(sc) && f->fs.prio) sc->tids.hpftids_in_use--; else sc->tids.ftids_in_use--; break; case FW_FILTER_WR_SUCCESS: case FW_FILTER_WR_EINVAL: default: panic("%s: unexpected reply %d for filter[%d].", __func__, rc, idx); } f->pending = 0; cv_broadcast(&sc->tids.ftid_cv); mtx_unlock(&sc->tids.ftid_lock); return (0); } /* * This is the reply to the Active Open that created the filter. Additional TCB * updates may be required to complete the filter configuration. */ int t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status))); u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status)); struct filter_entry *f = lookup_atid(sc, atid); KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); mtx_lock(&sc->tids.hftid_lock); KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f)); KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.", __func__, f, f->tid)); if (status == CPL_ERR_NONE) { f->tid = GET_TID(cpl); MPASS(lookup_hftid(sc, f->tid) == NULL); insert_hftid(sc, f); /* * Leave the filter pending until it is fully set up, which will * be indicated by the reply to the last TCB update. No need to * unblock the ioctl thread either. */ if (configure_hashfilter_tcb(sc, f) == EINPROGRESS) goto done; f->valid = 1; f->pending = 0; } else { /* provide errno instead of tid to ioctl */ f->tid = act_open_rpl_status_to_errno(status); f->valid = 0; f->pending = 0; if (act_open_has_tid(status)) release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]); free_filter_resources(f); remove_hf(sc, f); if (f->locked == 0) free(f, M_CXGBE); } cv_broadcast(&sc->tids.hftid_cv); done: mtx_unlock(&sc->tids.hftid_lock); free_atid(sc, atid); return (0); } int t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); u_int tid = GET_TID(rpl); struct filter_entry *f; mtx_lock(&sc->tids.hftid_lock); f = lookup_hftid(sc, tid); KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__)); KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__, f, tid)); KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.", __func__, f, tid)); f->pending = 0; if (rpl->status == 0) { f->valid = 1; } else { f->tid = EIO; f->valid = 0; free_filter_resources(f); remove_hftid(sc, f); remove_hf(sc, f); release_tid(sc, tid, &sc->sge.ctrlq[0]); if (f->locked == 0) free(f, M_CXGBE); } cv_broadcast(&sc->tids.hftid_cv); mtx_unlock(&sc->tids.hftid_lock); return (0); } int t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); unsigned int tid = GET_TID(cpl); struct filter_entry *f; mtx_lock(&sc->tids.hftid_lock); f = lookup_hftid(sc, tid); KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__)); KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__, f, tid)); KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f, tid)); f->pending = 0; if (cpl->status == 0) { f->valid = 0; free_filter_resources(f); remove_hftid(sc, f); remove_hf(sc, f); release_tid(sc, tid, &sc->sge.ctrlq[0]); if (f->locked == 0) free(f, M_CXGBE); } cv_broadcast(&sc->tids.hftid_cv); mtx_unlock(&sc->tids.hftid_lock); return (0); } static int get_tcamfilter(struct adapter *sc, struct t4_filter *t) { int i, nfilters; struct filter_entry *f; u_int in_use; #ifdef INVARIANTS u_int tid_base; #endif MPASS(!t->fs.hash); if (separate_hpfilter_region(sc) && t->fs.prio) { nfilters = sc->tids.nhpftids; f = sc->tids.hpftid_tab; in_use = sc->tids.hpftids_in_use; #ifdef INVARIANTS tid_base = sc->tids.hpftid_base; #endif } else { nfilters = sc->tids.nftids; f = sc->tids.ftid_tab; in_use = sc->tids.ftids_in_use; #ifdef INVARIANTS tid_base = sc->tids.ftid_base; #endif } if (in_use == 0 || f == NULL || t->idx >= nfilters) { t->idx = 0xffffffff; return (0); } f += t->idx; mtx_lock(&sc->tids.ftid_lock); for (i = t->idx; i < nfilters; i++, f++) { if (f->valid) { MPASS(f->tid == tid_base + i); t->idx = i; t->l2tidx = f->l2te ? f->l2te->idx : 0; t->smtidx = f->smt ? f->smt->idx : 0; if (f->fs.hitcnts) t->hits = get_filter_hits(sc, f->tid); else t->hits = UINT64_MAX; t->fs = f->fs; goto done; } } t->idx = 0xffffffff; done: mtx_unlock(&sc->tids.ftid_lock); return (0); } static int get_hashfilter(struct adapter *sc, struct t4_filter *t) { struct tid_info *ti = &sc->tids; int tid; struct filter_entry *f; const int inv_tid = ti->ntids + ti->tid_base; MPASS(t->fs.hash); if (ti->tids_in_use == 0 || ti->hftid_hash_tid == NULL || t->idx >= inv_tid) { t->idx = 0xffffffff; return (0); } if (t->idx < ti->tid_base) t->idx = ti->tid_base; mtx_lock(&ti->hftid_lock); for (tid = t->idx; tid < inv_tid; tid++) { f = lookup_hftid(sc, tid); if (f != NULL && f->valid) { t->idx = tid; t->l2tidx = f->l2te ? f->l2te->idx : 0; t->smtidx = f->smt ? f->smt->idx : 0; if (f->fs.hitcnts) t->hits = get_filter_hits(sc, tid); else t->hits = UINT64_MAX; t->fs = f->fs; goto done; } } t->idx = 0xffffffff; done: mtx_unlock(&ti->hftid_lock); return (0); } static void mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid, uint64_t ftuple, struct cpl_act_open_req6 *cpl) { struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl; struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl; /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */ MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6); MPASS(atid >= 0); if (chip_id(sc) == CHELSIO_T5) { INIT_TP_WR(cpl5, 0); } else { INIT_TP_WR(cpl6, 0); cpl6->rsvd2 = 0; cpl6->opt3 = 0; } OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) | V_TID_COOKIE(CPL_COOKIE_HASHFILTER))); cpl->local_port = htobe16(f->fs.val.dport); cpl->peer_port = htobe16(f->fs.val.sport); cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip); cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1); cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip); cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1); cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE || f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) | V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) | V_NO_CONG(f->fs.rpttid) | V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) | F_TCAM_BYPASS | F_NON_OFFLOAD); cpl6->params = htobe64(V_FILTER_TUPLE(ftuple)); cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) | V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) | V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID | F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) | V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) | V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1))); } static void mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid, uint64_t ftuple, struct cpl_act_open_req *cpl) { struct cpl_t5_act_open_req *cpl5 = (void *)cpl; struct cpl_t6_act_open_req *cpl6 = (void *)cpl; /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */ MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6); MPASS(atid >= 0); if (chip_id(sc) == CHELSIO_T5) { INIT_TP_WR(cpl5, 0); } else { INIT_TP_WR(cpl6, 0); cpl6->rsvd2 = 0; cpl6->opt3 = 0; } OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) | V_TID_COOKIE(CPL_COOKIE_HASHFILTER))); cpl->local_port = htobe16(f->fs.val.dport); cpl->peer_port = htobe16(f->fs.val.sport); cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 | f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24; cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 | f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24; cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE || f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) | V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) | V_NO_CONG(f->fs.rpttid) | V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) | F_TCAM_BYPASS | F_NON_OFFLOAD); cpl6->params = htobe64(V_FILTER_TUPLE(ftuple)); cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) | V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) | V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID | F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) | V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) | V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1))); } static int act_open_cpl_len16(struct adapter *sc, int isipv6) { int idx; static const int sz_table[3][2] = { { howmany(sizeof (struct cpl_act_open_req), 16), howmany(sizeof (struct cpl_act_open_req6), 16) }, { howmany(sizeof (struct cpl_t5_act_open_req), 16), howmany(sizeof (struct cpl_t5_act_open_req6), 16) }, { howmany(sizeof (struct cpl_t6_act_open_req), 16), howmany(sizeof (struct cpl_t6_act_open_req6), 16) }, }; MPASS(chip_id(sc) >= CHELSIO_T4); idx = min(chip_id(sc) - CHELSIO_T4, 2); return (sz_table[idx][!!isipv6]); } static int set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple, struct l2t_entry *l2te, struct smt_entry *smt) { void *wr; struct wrq_cookie cookie; struct filter_entry *f; int rc, atid = -1; uint32_t hash; MPASS(t->fs.hash); /* Already validated against fconf, iconf */ MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0); MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0); hash = hf_hashfn_4t(&t->fs); mtx_lock(&sc->tids.hftid_lock); if (lookup_hf(sc, &t->fs, hash) != NULL) { rc = EEXIST; goto done; } f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT); if (__predict_false(f == NULL)) { rc = ENOMEM; goto done; } f->fs = t->fs; f->l2te = l2te; f->smt = smt; atid = alloc_atid(sc, f); if (__predict_false(atid) == -1) { free(f, M_CXGBE); rc = EAGAIN; goto done; } MPASS(atid >= 0); wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type), &cookie); if (wr == NULL) { free_atid(sc, atid); free(f, M_CXGBE); rc = ENOMEM; goto done; } if (f->fs.type) mk_act_open_req6(sc, f, atid, ftuple, wr); else mk_act_open_req(sc, f, atid, ftuple, wr); f->locked = 1; /* ithread mustn't free f if ioctl is still around. */ f->pending = 1; f->tid = -1; insert_hf(sc, f, hash); commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie); for (;;) { MPASS(f->locked); if (f->pending == 0) { if (f->valid) { rc = 0; f->locked = 0; t->idx = f->tid; } else { rc = f->tid; free(f, M_CXGBE); } break; } if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) { f->locked = 0; rc = EINPROGRESS; break; } } done: mtx_unlock(&sc->tids.hftid_lock); return (rc); } /* SET_TCB_FIELD sent as a ULP command looks like this */ #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \ sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core)) static void * mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask, uint64_t val, uint32_t tid, uint32_t qid) { struct ulptx_idata *ulpsc; struct cpl_set_tcb_field_core *req; ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16)); ulpsc = (struct ulptx_idata *)(ulpmc + 1); ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); ulpsc->len = htobe32(sizeof(*req)); req = (struct cpl_set_tcb_field_core *)(ulpsc + 1); OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid)); req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); req->mask = htobe64(mask); req->val = htobe64(val); ulpsc = (struct ulptx_idata *)(req + 1); if (LEN__SET_TCB_FIELD_ULP % 16) { ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); ulpsc->len = htobe32(0); return (ulpsc + 1); } return (ulpsc); } /* ABORT_REQ sent as a ULP command looks like this */ #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \ sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core)) static void * mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid) { struct ulptx_idata *ulpsc; struct cpl_abort_req_core *req; ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16)); ulpsc = (struct ulptx_idata *)(ulpmc + 1); ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); ulpsc->len = htobe32(sizeof(*req)); req = (struct cpl_abort_req_core *)(ulpsc + 1); OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid)); req->rsvd0 = htonl(0); req->rsvd1 = 0; req->cmd = CPL_ABORT_NO_RST; ulpsc = (struct ulptx_idata *)(req + 1); if (LEN__ABORT_REQ_ULP % 16) { ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); ulpsc->len = htobe32(0); return (ulpsc + 1); } return (ulpsc); } /* ABORT_RPL sent as a ULP command looks like this */ #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \ sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core)) static void * mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid) { struct ulptx_idata *ulpsc; struct cpl_abort_rpl_core *rpl; ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16)); ulpsc = (struct ulptx_idata *)(ulpmc + 1); ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); ulpsc->len = htobe32(sizeof(*rpl)); rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1); OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); rpl->rsvd0 = htonl(0); rpl->rsvd1 = 0; rpl->cmd = CPL_ABORT_NO_RST; ulpsc = (struct ulptx_idata *)(rpl + 1); if (LEN__ABORT_RPL_ULP % 16) { ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); ulpsc->len = htobe32(0); return (ulpsc + 1); } return (ulpsc); } static inline int del_hashfilter_wrlen(void) { return (sizeof(struct work_request_hdr) + roundup2(LEN__SET_TCB_FIELD_ULP, 16) + roundup2(LEN__ABORT_REQ_ULP, 16) + roundup2(LEN__ABORT_RPL_ULP, 16)); } static void mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid) { struct ulp_txpkt *ulpmc; INIT_ULPTX_WRH(wrh, wrlen, 0, 0); ulpmc = (struct ulp_txpkt *)(wrh + 1); ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO, V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0); ulpmc = mk_abort_req_ulp(ulpmc, tid); ulpmc = mk_abort_rpl_ulp(ulpmc, tid); } static int del_hashfilter(struct adapter *sc, struct t4_filter *t) { struct tid_info *ti = &sc->tids; void *wr; struct filter_entry *f; struct wrq_cookie cookie; int rc; const int wrlen = del_hashfilter_wrlen(); const int inv_tid = ti->ntids + ti->tid_base; MPASS(sc->tids.hftid_hash_4t != NULL); MPASS(sc->tids.ntids > 0); if (t->idx < sc->tids.tid_base || t->idx >= inv_tid) return (EINVAL); mtx_lock(&ti->hftid_lock); f = lookup_hftid(sc, t->idx); if (f == NULL || f->valid == 0) { rc = EINVAL; goto done; } MPASS(f->tid == t->idx); if (f->locked) { rc = EPERM; goto done; } if (f->pending) { rc = EBUSY; goto done; } wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie); if (wr == NULL) { rc = ENOMEM; goto done; } mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id); f->locked = 1; f->pending = 1; commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie); t->fs = f->fs; /* extra info for the caller */ for (;;) { MPASS(f->locked); if (f->pending == 0) { if (f->valid) { f->locked = 0; rc = EIO; } else { rc = 0; free(f, M_CXGBE); } break; } if (cv_wait_sig(&ti->hftid_cv, &ti->hftid_lock) != 0) { f->locked = 0; rc = EINPROGRESS; break; } } done: mtx_unlock(&ti->hftid_lock); return (rc); } #define WORD_MASK 0xffffffff static void set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip, const bool sip, const bool dp, const bool sp) { if (dip) { if (f->fs.type) { set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK, f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 | f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1); set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW + 1, WORD_MASK, f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 | f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1); set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW + 2, WORD_MASK, f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 | f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1); set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW + 3, WORD_MASK, f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 | f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1); } else { set_tcb_field(sc, f->tid, W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK, f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 | f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1); } } if (sip) { if (f->fs.type) { set_tcb_field(sc, f->tid, W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK, f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 | f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1); set_tcb_field(sc, f->tid, W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK, f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 | f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1); set_tcb_field(sc, f->tid, W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK, f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 | f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1); set_tcb_field(sc, f->tid, W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK, f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 | f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1); } else { set_tcb_field(sc, f->tid, W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK, f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 | f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1); } } set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK, (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1); } /* * Returns EINPROGRESS to indicate that at least one TCB update was sent and the * last of the series of updates requested a reply. The reply informs the * driver that the filter is fully setup. */ static int configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f) { int updated = 0; MPASS(f->tid < sc->tids.ntids); MPASS(f->fs.hash); MPASS(f->pending); MPASS(f->valid == 0); if (f->fs.newdmac) { set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1); updated++; } if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) { set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1); updated++; } if (f->fs.newsmac) { MPASS(f->smt != NULL); set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1); set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL, V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx), 1); updated++; } switch(f->fs.nat_mode) { case NAT_MODE_NONE: break; case NAT_MODE_DIP: set_nat_params(sc, f, true, false, false, false); updated++; break; case NAT_MODE_DIP_DP: set_nat_params(sc, f, true, false, true, false); updated++; break; case NAT_MODE_DIP_DP_SIP: set_nat_params(sc, f, true, true, true, false); updated++; break; case NAT_MODE_DIP_DP_SP: set_nat_params(sc, f, true, false, true, true); updated++; break; case NAT_MODE_SIP_SP: set_nat_params(sc, f, false, true, false, true); updated++; break; case NAT_MODE_DIP_SIP_SP: set_nat_params(sc, f, true, true, false, true); updated++; break; case NAT_MODE_ALL: set_nat_params(sc, f, true, true, true, true); updated++; break; default: MPASS(0); /* should have been validated earlier */ break; } if (f->fs.nat_seq_chk) { set_tcb_field(sc, f->tid, W_TCB_RCV_NXT, V_TCB_RCV_NXT(M_TCB_RCV_NXT), V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1); updated++; } if (is_t5(sc) && f->fs.action == FILTER_DROP) { /* * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop. */ set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) | V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1); updated++; } /* * Enable switching after all secondary resources (L2T entry, SMT entry, * etc.) are setup so that any switched packet will use correct * values. */ if (f->fs.action == FILTER_SWITCH) { set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1); updated++; } if (f->fs.hitcnts || updated > 0) { set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP, V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) | V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE), V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0); return (EINPROGRESS); } return (0); } diff --git a/sys/dev/cxgbe/t4_ioctl.h b/sys/dev/cxgbe/t4_ioctl.h index 1daa4f5dfa27..4f0a71683ef0 100644 --- a/sys/dev/cxgbe/t4_ioctl.h +++ b/sys/dev/cxgbe/t4_ioctl.h @@ -1,432 +1,432 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef __T4_IOCTL_H__ #define __T4_IOCTL_H__ #include #include #include /* * Ioctl commands specific to this driver. */ enum { T4_GETREG = 0x40, /* read register */ T4_SETREG, /* write register */ T4_REGDUMP, /* dump of all registers */ T4_GET_FILTER_MODE, /* get global filter mode */ T4_SET_FILTER_MODE, /* set global filter mode */ T4_GET_FILTER, /* get information about a filter */ T4_SET_FILTER, /* program a filter */ T4_DEL_FILTER, /* delete a filter */ T4_GET_SGE_CONTEXT, /* get SGE context for a queue */ T4_LOAD_FW, /* flash firmware */ T4_GET_MEM, /* read memory */ T4_GET_I2C, /* read from i2c addressible device */ T4_CLEAR_STATS, /* clear a port's MAC statistics */ T4_SET_OFLD_POLICY, /* Set offload policy */ T4_SET_SCHED_CLASS, /* set sched class */ T4_SET_SCHED_QUEUE, /* set queue class */ T4_GET_TRACER, /* get information about a tracer */ T4_SET_TRACER, /* program a tracer */ T4_LOAD_CFG, /* copy a config file to card's flash */ T4_LOAD_BOOT, /* flash boot rom */ T4_LOAD_BOOTCFG, /* flash bootcfg */ T4_CUDBG_DUMP, /* debug dump of chip state */ }; struct t4_reg { uint32_t addr; uint32_t size; uint64_t val; }; #define T4_REGDUMP_SIZE (160 * 1024) #define T5_REGDUMP_SIZE (332 * 1024) struct t4_regdump { uint32_t version; uint32_t len; /* bytes */ uint32_t *data; }; struct t4_data { uint32_t len; uint8_t *data; }; struct t4_bootrom { uint32_t pf_offset; uint32_t pfidx_addr; uint32_t len; uint8_t *data; }; struct t4_i2c_data { uint8_t port_id; uint8_t dev_addr; uint8_t offset; uint8_t len; uint8_t data[8]; }; /* * A hardware filter is some valid combination of these. */ #define T4_FILTER_IPv4 0x1 /* IPv4 packet */ #define T4_FILTER_IPv6 0x2 /* IPv6 packet */ #define T4_FILTER_IP_SADDR 0x4 /* Source IP address or network */ #define T4_FILTER_IP_DADDR 0x8 /* Destination IP address or network */ #define T4_FILTER_IP_SPORT 0x10 /* Source IP port */ #define T4_FILTER_IP_DPORT 0x20 /* Destination IP port */ #define T4_FILTER_FCoE 0x40 /* Fibre Channel over Ethernet packet */ #define T4_FILTER_PORT 0x80 /* Physical ingress port */ -#define T4_FILTER_VNIC 0x100 /* VNIC id or outer VLAN */ +#define T4_FILTER_VNIC 0x100 /* See the IC_* bits towards the end */ #define T4_FILTER_VLAN 0x200 /* VLAN ID */ #define T4_FILTER_IP_TOS 0x400 /* IPv4 TOS/IPv6 Traffic Class */ #define T4_FILTER_IP_PROTO 0x800 /* IP protocol */ #define T4_FILTER_ETH_TYPE 0x1000 /* Ethernet Type */ #define T4_FILTER_MAC_IDX 0x2000 /* MPS MAC address match index */ #define T4_FILTER_MPS_HIT_TYPE 0x4000 /* MPS match type */ #define T4_FILTER_IP_FRAGMENT 0x8000 /* IP fragment */ - -#define T4_FILTER_IC_VNIC 0x80000000 /* TP Ingress Config's F_VNIC - bit. It indicates whether - T4_FILTER_VNIC bit means VNIC - id (PF/VF) or outer VLAN. - 0 = oVLAN, 1 = VNIC */ +/* + * T4_FILTER_VNIC's real meaning depends on the ingress config. + */ +#define T4_FILTER_IC_OVLAN 0 /* outer VLAN */ +#define T4_FILTER_IC_VNIC 0x80000000 /* VNIC id (PF/VF) */ +#define T4_FILTER_IC_ENCAP 0x40000000 /* Filter action */ enum { FILTER_PASS = 0, /* default */ FILTER_DROP, FILTER_SWITCH }; /* 802.1q manipulation on FILTER_SWITCH */ enum { VLAN_NOCHANGE = 0, /* default */ VLAN_REMOVE, VLAN_INSERT, VLAN_REWRITE }; /* MPS match type */ enum { UCAST_EXACT = 0, /* exact unicast match */ UCAST_HASH = 1, /* inexact (hashed) unicast match */ MCAST_EXACT = 2, /* exact multicast match */ MCAST_HASH = 3, /* inexact (hashed) multicast match */ PROMISC = 4, /* no match but port is promiscuous */ HYPPROMISC = 5, /* port is hypervisor-promisuous + not bcast */ BCAST = 6, /* broadcast packet */ }; /* Rx steering */ enum { DST_MODE_QUEUE, /* queue is directly specified by filter */ DST_MODE_RSS_QUEUE, /* filter specifies RSS entry containing queue */ DST_MODE_RSS, /* queue selected by default RSS hash lookup */ DST_MODE_FILT_RSS /* queue selected by hashing in filter-specified RSS subtable */ }; enum { NAT_MODE_NONE = 0, /* No NAT performed */ NAT_MODE_DIP, /* NAT on Dst IP */ NAT_MODE_DIP_DP, /* NAT on Dst IP, Dst Port */ NAT_MODE_DIP_DP_SIP, /* NAT on Dst IP, Dst Port and Src IP */ NAT_MODE_DIP_DP_SP, /* NAT on Dst IP, Dst Port and Src Port */ NAT_MODE_SIP_SP, /* NAT on Src IP and Src Port */ NAT_MODE_DIP_SIP_SP, /* NAT on Dst IP, Src IP and Src Port */ NAT_MODE_ALL /* NAT on entire 4-tuple */ }; struct t4_filter_tuple { /* * These are always available. */ uint8_t sip[16]; /* source IP address (IPv4 in [3:0]) */ uint8_t dip[16]; /* destination IP address (IPv4 in [3:0]) */ uint16_t sport; /* source port */ uint16_t dport; /* destination port */ /* * A combination of these (up to 36 bits) is available. TP_VLAN_PRI_MAP * is used to select the global mode and all filters are limited to the * set of fields allowed by the global mode. */ uint16_t vnic; /* VNIC id (PF/VF) or outer VLAN tag */ uint16_t vlan; /* VLAN tag */ uint16_t ethtype; /* Ethernet type */ uint8_t tos; /* TOS/Traffic Type */ uint8_t proto; /* protocol type */ uint32_t fcoe:1; /* FCoE packet */ uint32_t iport:3; /* ingress port */ uint32_t matchtype:3; /* MPS match type */ uint32_t frag:1; /* fragmentation extension header */ uint32_t macidx:9; /* exact match MAC index */ uint32_t vlan_vld:1; /* VLAN valid */ uint32_t ovlan_vld:1; /* outer VLAN tag valid, value in "vnic" */ uint32_t pfvf_vld:1; /* VNIC id (PF/VF) valid, value in "vnic" */ }; struct t4_filter_specification { uint32_t hitcnts:1; /* count filter hits in TCB */ uint32_t prio:1; /* filter has priority over active/server */ uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */ uint32_t hash:1; /* 0 => LE TCAM, 1 => Hash */ uint32_t action:2; /* drop, pass, switch */ uint32_t rpttid:1; /* report TID in RSS hash field */ uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */ uint32_t iq:10; /* ingress queue */ uint32_t maskhash:1; /* dirsteer=0: steer to an RSS sub-region */ uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */ /* 1 => TCB contains IQ ID */ /* * Switch proxy/rewrite fields. An ingress packet which matches a * filter with "switch" set will be looped back out as an egress * packet -- potentially with some Ethernet header rewriting. */ uint32_t eport:2; /* egress port to switch packet out */ uint32_t newdmac:1; /* rewrite destination MAC address */ uint32_t newsmac:1; /* rewrite source MAC address */ uint32_t swapmac:1; /* swap SMAC/DMAC for loopback packet */ uint32_t newvlan:2; /* rewrite VLAN Tag */ uint32_t nat_mode:3; /* NAT operation mode */ uint32_t nat_flag_chk:1;/* check TCP flags before NAT'ing */ uint32_t nat_seq_chk; /* sequence value to use for NAT check*/ uint8_t dmac[ETHER_ADDR_LEN]; /* new destination MAC address */ uint8_t smac[ETHER_ADDR_LEN]; /* new source MAC address */ uint16_t vlan; /* VLAN Tag to insert */ uint8_t nat_dip[16]; /* destination IP to use after NAT'ing */ uint8_t nat_sip[16]; /* source IP to use after NAT'ing */ uint16_t nat_dport; /* destination port to use after NAT'ing */ uint16_t nat_sport; /* source port to use after NAT'ing */ /* * Filter rule value/mask pairs. */ struct t4_filter_tuple val; struct t4_filter_tuple mask; }; struct t4_filter { uint32_t idx; uint16_t l2tidx; uint16_t smtidx; uint64_t hits; struct t4_filter_specification fs; }; /* Tx Scheduling Class parameters */ struct t4_sched_class_params { int8_t level; /* scheduler hierarchy level */ int8_t mode; /* per-class or per-flow */ int8_t rateunit; /* bit or packet rate */ int8_t ratemode; /* %port relative or kbps absolute */ int8_t channel; /* scheduler channel [0..N] */ int8_t cl; /* scheduler class [0..N] */ int32_t minrate; /* minimum rate */ int32_t maxrate; /* maximum rate */ int16_t weight; /* percent weight */ int16_t pktsize; /* average packet size */ }; /* * Support for "sched-class" command to allow a TX Scheduling Class to be * programmed with various parameters. */ struct t4_sched_params { int8_t subcmd; /* sub-command */ int8_t type; /* packet or flow */ union { struct { /* sub-command SCHED_CLASS_CONFIG */ int8_t minmax; /* minmax enable */ } config; struct t4_sched_class_params params; uint8_t reserved[6 + 8 * 8]; } u; }; enum { SCHED_CLASS_SUBCMD_CONFIG, /* config sub-command */ SCHED_CLASS_SUBCMD_PARAMS, /* params sub-command */ }; enum { SCHED_CLASS_TYPE_PACKET, }; enum { SCHED_CLASS_LEVEL_CL_RL, /* class rate limiter */ SCHED_CLASS_LEVEL_CL_WRR, /* class weighted round robin */ SCHED_CLASS_LEVEL_CH_RL, /* channel rate limiter */ }; enum { SCHED_CLASS_MODE_CLASS, /* per-class scheduling */ SCHED_CLASS_MODE_FLOW, /* per-flow scheduling */ }; enum { SCHED_CLASS_RATEUNIT_BITS, /* bit rate scheduling */ SCHED_CLASS_RATEUNIT_PKTS, /* packet rate scheduling */ }; enum { SCHED_CLASS_RATEMODE_REL, /* percent of port bandwidth */ SCHED_CLASS_RATEMODE_ABS, /* Kb/s */ }; /* * Support for "sched_queue" command to allow one or more NIC TX Queues to be * bound to a TX Scheduling Class. */ struct t4_sched_queue { uint8_t port; int8_t queue; /* queue index; -1 => all queues */ int8_t cl; /* class index; -1 => unbind */ }; #define T4_SGE_CONTEXT_SIZE 24 enum { SGE_CONTEXT_EGRESS, SGE_CONTEXT_INGRESS, SGE_CONTEXT_FLM, SGE_CONTEXT_CNM }; struct t4_sge_context { uint32_t mem_id; uint32_t cid; uint32_t data[T4_SGE_CONTEXT_SIZE / 4]; }; struct t4_mem_range { uint32_t addr; uint32_t len; uint32_t *data; }; #define T4_TRACE_LEN 112 struct t4_trace_params { uint32_t data[T4_TRACE_LEN / 4]; uint32_t mask[T4_TRACE_LEN / 4]; uint16_t snap_len; uint16_t min_len; uint8_t skip_ofst; uint8_t skip_len; uint8_t invert; uint8_t port; }; struct t4_tracer { uint8_t idx; uint8_t enabled; uint8_t valid; struct t4_trace_params tp; }; struct t4_cudbg_dump { uint8_t wr_flash; uint8_t bitmap[16]; uint32_t len; uint8_t *data; }; enum { OPEN_TYPE_LISTEN = 'L', OPEN_TYPE_ACTIVE = 'A', OPEN_TYPE_PASSIVE = 'P', OPEN_TYPE_DONTCARE = 'D', }; struct offload_settings { int8_t offload; int8_t rx_coalesce; int8_t cong_algo; int8_t sched_class; int8_t tstamp; int8_t sack; int8_t nagle; int8_t ecn; int8_t ddp; int8_t tls; int16_t txq; int16_t rxq; int16_t mss; }; struct offload_rule { char open_type; struct offload_settings settings; struct bpf_program bpf_prog; /* compiled program/filter */ }; /* * An offload policy consists of a set of rules matched in sequence. The * settings of the first rule that matches are applied to that connection. */ struct t4_offload_policy { uint32_t nrules; struct offload_rule *rule; }; #define CHELSIO_T4_GETREG _IOWR('f', T4_GETREG, struct t4_reg) #define CHELSIO_T4_SETREG _IOW('f', T4_SETREG, struct t4_reg) #define CHELSIO_T4_REGDUMP _IOWR('f', T4_REGDUMP, struct t4_regdump) #define CHELSIO_T4_GET_FILTER_MODE _IOWR('f', T4_GET_FILTER_MODE, uint32_t) #define CHELSIO_T4_SET_FILTER_MODE _IOW('f', T4_SET_FILTER_MODE, uint32_t) #define CHELSIO_T4_GET_FILTER _IOWR('f', T4_GET_FILTER, struct t4_filter) #define CHELSIO_T4_SET_FILTER _IOWR('f', T4_SET_FILTER, struct t4_filter) #define CHELSIO_T4_DEL_FILTER _IOW('f', T4_DEL_FILTER, struct t4_filter) #define CHELSIO_T4_GET_SGE_CONTEXT _IOWR('f', T4_GET_SGE_CONTEXT, \ struct t4_sge_context) #define CHELSIO_T4_LOAD_FW _IOW('f', T4_LOAD_FW, struct t4_data) #define CHELSIO_T4_GET_MEM _IOW('f', T4_GET_MEM, struct t4_mem_range) #define CHELSIO_T4_GET_I2C _IOWR('f', T4_GET_I2C, struct t4_i2c_data) #define CHELSIO_T4_CLEAR_STATS _IOW('f', T4_CLEAR_STATS, uint32_t) #define CHELSIO_T4_SCHED_CLASS _IOW('f', T4_SET_SCHED_CLASS, \ struct t4_sched_params) #define CHELSIO_T4_SCHED_QUEUE _IOW('f', T4_SET_SCHED_QUEUE, \ struct t4_sched_queue) #define CHELSIO_T4_GET_TRACER _IOWR('f', T4_GET_TRACER, struct t4_tracer) #define CHELSIO_T4_SET_TRACER _IOW('f', T4_SET_TRACER, struct t4_tracer) #define CHELSIO_T4_LOAD_CFG _IOW('f', T4_LOAD_CFG, struct t4_data) #define CHELSIO_T4_LOAD_BOOT _IOW('f', T4_LOAD_BOOT, struct t4_bootrom) #define CHELSIO_T4_LOAD_BOOTCFG _IOW('f', T4_LOAD_BOOTCFG, struct t4_data) #define CHELSIO_T4_CUDBG_DUMP _IOWR('f', T4_CUDBG_DUMP, struct t4_cudbg_dump) #define CHELSIO_T4_SET_OFLD_POLICY _IOW('f', T4_SET_OFLD_POLICY, struct t4_offload_policy) #endif diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c index 0f0b4d7ee5b3..df837cc50454 100644 --- a/sys/dev/cxgbe/tom/t4_tom.c +++ b/sys/dev/cxgbe/tom/t4_tom.c @@ -1,1942 +1,1942 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_kern_tls.h" #include "opt_ratelimit.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TCPSTATES #include #include #include #include #include #ifdef TCP_OFFLOAD #include "common/common.h" #include "common/t4_msg.h" #include "common/t4_regs.h" #include "common/t4_regs_values.h" #include "common/t4_tcb.h" #include "t4_clip.h" #include "tom/t4_tom_l2t.h" #include "tom/t4_tom.h" #include "tom/t4_tls.h" static struct protosw toe_protosw; static struct pr_usrreqs toe_usrreqs; static struct protosw toe6_protosw; static struct pr_usrreqs toe6_usrreqs; /* Module ops */ static int t4_tom_mod_load(void); static int t4_tom_mod_unload(void); static int t4_tom_modevent(module_t, int, void *); /* ULD ops and helpers */ static int t4_tom_activate(struct adapter *); static int t4_tom_deactivate(struct adapter *); static struct uld_info tom_uld_info = { .uld_id = ULD_TOM, .activate = t4_tom_activate, .deactivate = t4_tom_deactivate, }; static void release_offload_resources(struct toepcb *); static int alloc_tid_tabs(struct tid_info *); static void free_tid_tabs(struct tid_info *); static void free_tom_data(struct adapter *, struct tom_data *); static void reclaim_wr_resources(void *, int); struct toepcb * alloc_toepcb(struct vi_info *vi, int flags) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct toepcb *toep; int tx_credits, txsd_total, len; /* * The firmware counts tx work request credits in units of 16 bytes * each. Reserve room for an ABORT_REQ so the driver never has to worry * about tx credits if it wants to abort a connection. */ tx_credits = sc->params.ofldq_wr_cred; tx_credits -= howmany(sizeof(struct cpl_abort_req), 16); /* * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte * immediate payload, and firmware counts tx work request credits in * units of 16 byte. Calculate the maximum work requests possible. */ txsd_total = tx_credits / howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16); len = offsetof(struct toepcb, txsd) + txsd_total * sizeof(struct ofld_tx_sdesc); toep = malloc(len, M_CXGBE, M_ZERO | flags); if (toep == NULL) return (NULL); refcount_init(&toep->refcount, 1); toep->td = sc->tom_softc; toep->vi = vi; toep->tid = -1; toep->tx_total = tx_credits; toep->tx_credits = tx_credits; mbufq_init(&toep->ulp_pduq, INT_MAX); mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX); toep->txsd_total = txsd_total; toep->txsd_avail = txsd_total; toep->txsd_pidx = 0; toep->txsd_cidx = 0; aiotx_init_toep(toep); return (toep); } /* * Initialize a toepcb after its params have been filled out. */ int init_toepcb(struct vi_info *vi, struct toepcb *toep) { struct conn_params *cp = &toep->params; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct tx_cl_rl_params *tc; if (cp->tc_idx >= 0 && cp->tc_idx < sc->chip_params->nsched_cls) { tc = &pi->sched_params->cl_rl[cp->tc_idx]; mtx_lock(&sc->tc_lock); if (tc->flags & CLRL_ERR) { log(LOG_ERR, "%s: failed to associate traffic class %u with tid %u\n", device_get_nameunit(vi->dev), cp->tc_idx, toep->tid); cp->tc_idx = -1; } else { tc->refcount++; } mtx_unlock(&sc->tc_lock); } toep->ofld_txq = &sc->sge.ofld_txq[cp->txq_idx]; toep->ofld_rxq = &sc->sge.ofld_rxq[cp->rxq_idx]; toep->ctrlq = &sc->sge.ctrlq[pi->port_id]; tls_init_toep(toep); if (ulp_mode(toep) == ULP_MODE_TCPDDP) ddp_init_toep(toep); toep->flags |= TPF_INITIALIZED; return (0); } struct toepcb * hold_toepcb(struct toepcb *toep) { refcount_acquire(&toep->refcount); return (toep); } void free_toepcb(struct toepcb *toep) { if (refcount_release(&toep->refcount) == 0) return; KASSERT(!(toep->flags & TPF_ATTACHED), ("%s: attached to an inpcb", __func__)); KASSERT(!(toep->flags & TPF_CPL_PENDING), ("%s: CPL pending", __func__)); if (toep->flags & TPF_INITIALIZED) { if (ulp_mode(toep) == ULP_MODE_TCPDDP) ddp_uninit_toep(toep); tls_uninit_toep(toep); } free(toep, M_CXGBE); } /* * Set up the socket for TCP offload. */ void offload_socket(struct socket *so, struct toepcb *toep) { struct tom_data *td = toep->td; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); struct sockbuf *sb; INP_WLOCK_ASSERT(inp); /* Update socket */ sb = &so->so_snd; SOCKBUF_LOCK(sb); sb->sb_flags |= SB_NOCOALESCE; SOCKBUF_UNLOCK(sb); sb = &so->so_rcv; SOCKBUF_LOCK(sb); sb->sb_flags |= SB_NOCOALESCE; if (inp->inp_vflag & INP_IPV6) so->so_proto = &toe6_protosw; else so->so_proto = &toe_protosw; SOCKBUF_UNLOCK(sb); /* Update TCP PCB */ tp->tod = &td->tod; tp->t_toe = toep; tp->t_flags |= TF_TOE; /* Install an extra hold on inp */ toep->inp = inp; toep->flags |= TPF_ATTACHED; in_pcbref(inp); /* Add the TOE PCB to the active list */ mtx_lock(&td->toep_list_lock); TAILQ_INSERT_HEAD(&td->toep_list, toep, link); mtx_unlock(&td->toep_list_lock); } /* This is _not_ the normal way to "unoffload" a socket. */ void undo_offload_socket(struct socket *so) { struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); struct toepcb *toep = tp->t_toe; struct tom_data *td = toep->td; struct sockbuf *sb; INP_WLOCK_ASSERT(inp); sb = &so->so_snd; SOCKBUF_LOCK(sb); sb->sb_flags &= ~SB_NOCOALESCE; SOCKBUF_UNLOCK(sb); sb = &so->so_rcv; SOCKBUF_LOCK(sb); sb->sb_flags &= ~SB_NOCOALESCE; SOCKBUF_UNLOCK(sb); tp->tod = NULL; tp->t_toe = NULL; tp->t_flags &= ~TF_TOE; toep->inp = NULL; toep->flags &= ~TPF_ATTACHED; if (in_pcbrele_wlocked(inp)) panic("%s: inp freed.", __func__); mtx_lock(&td->toep_list_lock); TAILQ_REMOVE(&td->toep_list, toep, link); mtx_unlock(&td->toep_list_lock); } static void release_offload_resources(struct toepcb *toep) { struct tom_data *td = toep->td; struct adapter *sc = td_adapter(td); int tid = toep->tid; KASSERT(!(toep->flags & TPF_CPL_PENDING), ("%s: %p has CPL pending.", __func__, toep)); KASSERT(!(toep->flags & TPF_ATTACHED), ("%s: %p is still attached.", __func__, toep)); CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)", __func__, toep, tid, toep->l2te, toep->ce); /* * These queues should have been emptied at approximately the same time * that a normal connection's socket's so_snd would have been purged or * drained. Do _not_ clean up here. */ MPASS(mbufq_len(&toep->ulp_pduq) == 0); MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0); #ifdef INVARIANTS if (ulp_mode(toep) == ULP_MODE_TCPDDP) ddp_assert_empty(toep); #endif MPASS(TAILQ_EMPTY(&toep->aiotx_jobq)); if (toep->l2te) t4_l2t_release(toep->l2te); if (tid >= 0) { remove_tid(sc, tid, toep->ce ? 2 : 1); release_tid(sc, tid, toep->ctrlq); } if (toep->ce) t4_release_lip(sc, toep->ce); if (toep->params.tc_idx != -1) t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->params.tc_idx); mtx_lock(&td->toep_list_lock); TAILQ_REMOVE(&td->toep_list, toep, link); mtx_unlock(&td->toep_list_lock); free_toepcb(toep); } /* * The kernel is done with the TCP PCB and this is our opportunity to unhook the * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no * pending CPL) then it is time to release all resources tied to the toepcb. * * Also gets called when an offloaded active open fails and the TOM wants the * kernel to take the TCP PCB back. */ static void t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp) { #if defined(KTR) || defined(INVARIANTS) struct inpcb *inp = tp->t_inpcb; #endif struct toepcb *toep = tp->t_toe; INP_WLOCK_ASSERT(inp); KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); KASSERT(toep->flags & TPF_ATTACHED, ("%s: not attached", __func__)); #ifdef KTR if (tp->t_state == TCPS_SYN_SENT) { CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)", __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags); } else { CTR6(KTR_CXGBE, "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)", toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp, inp->inp_flags); } #endif if (ulp_mode(toep) == ULP_MODE_TLS) tls_detach(toep); tp->tod = NULL; tp->t_toe = NULL; tp->t_flags &= ~TF_TOE; toep->flags &= ~TPF_ATTACHED; if (!(toep->flags & TPF_CPL_PENDING)) release_offload_resources(toep); } /* * setsockopt handler. */ static void t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name) { struct adapter *sc = tod->tod_softc; struct toepcb *toep = tp->t_toe; if (dir == SOPT_GET) return; CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name); switch (name) { case TCP_NODELAY: if (tp->t_state != TCPS_ESTABLISHED) break; toep->params.nagle = tp->t_flags & TF_NODELAY ? 0 : 1; t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, V_TF_NAGLE(1), V_TF_NAGLE(toep->params.nagle), 0, 0); break; default: break; } } static inline uint64_t get_tcb_tflags(const uint64_t *tcb) { return ((be64toh(tcb[14]) << 32) | (be64toh(tcb[15]) >> 32)); } static inline uint32_t get_tcb_field(const uint64_t *tcb, u_int word, uint32_t mask, u_int shift) { #define LAST_WORD ((TCB_SIZE / 4) - 1) uint64_t t1, t2; int flit_idx; MPASS(mask != 0); MPASS(word <= LAST_WORD); MPASS(shift < 32); flit_idx = (LAST_WORD - word) / 2; if (word & 0x1) shift += 32; t1 = be64toh(tcb[flit_idx]) >> shift; t2 = 0; if (fls(mask) > 64 - shift) { /* * Will spill over into the next logical flit, which is the flit * before this one. The flit_idx before this one must be valid. */ MPASS(flit_idx > 0); t2 = be64toh(tcb[flit_idx - 1]) << (64 - shift); } return ((t2 | t1) & mask); #undef LAST_WORD } #define GET_TCB_FIELD(tcb, F) \ get_tcb_field(tcb, W_TCB_##F, M_TCB_##F, S_TCB_##F) /* * Issues a CPL_GET_TCB to read the entire TCB for the tid. */ static int send_get_tcb(struct adapter *sc, u_int tid) { struct cpl_get_tcb *cpl; struct wrq_cookie cookie; MPASS(tid < sc->tids.ntids); cpl = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*cpl), 16), &cookie); if (__predict_false(cpl == NULL)) return (ENOMEM); bzero(cpl, sizeof(*cpl)); INIT_TP_WR(cpl, tid); OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_GET_TCB, tid)); cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) | V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id)); cpl->cookie = 0xff; commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie); return (0); } static struct tcb_histent * alloc_tcb_histent(struct adapter *sc, u_int tid, int flags) { struct tcb_histent *te; MPASS(flags == M_NOWAIT || flags == M_WAITOK); te = malloc(sizeof(*te), M_CXGBE, M_ZERO | flags); if (te == NULL) return (NULL); mtx_init(&te->te_lock, "TCB entry", NULL, MTX_DEF); callout_init_mtx(&te->te_callout, &te->te_lock, 0); te->te_adapter = sc; te->te_tid = tid; return (te); } static void free_tcb_histent(struct tcb_histent *te) { mtx_destroy(&te->te_lock); free(te, M_CXGBE); } /* * Start tracking the tid in the TCB history. */ int add_tid_to_history(struct adapter *sc, u_int tid) { struct tcb_histent *te = NULL; struct tom_data *td = sc->tom_softc; int rc; MPASS(tid < sc->tids.ntids); if (td->tcb_history == NULL) return (ENXIO); rw_wlock(&td->tcb_history_lock); if (td->tcb_history[tid] != NULL) { rc = EEXIST; goto done; } te = alloc_tcb_histent(sc, tid, M_NOWAIT); if (te == NULL) { rc = ENOMEM; goto done; } mtx_lock(&te->te_lock); rc = send_get_tcb(sc, tid); if (rc == 0) { te->te_flags |= TE_RPL_PENDING; td->tcb_history[tid] = te; } else { free(te, M_CXGBE); } mtx_unlock(&te->te_lock); done: rw_wunlock(&td->tcb_history_lock); return (rc); } static void remove_tcb_histent(struct tcb_histent *te) { struct adapter *sc = te->te_adapter; struct tom_data *td = sc->tom_softc; rw_assert(&td->tcb_history_lock, RA_WLOCKED); mtx_assert(&te->te_lock, MA_OWNED); MPASS(td->tcb_history[te->te_tid] == te); td->tcb_history[te->te_tid] = NULL; free_tcb_histent(te); rw_wunlock(&td->tcb_history_lock); } static inline struct tcb_histent * lookup_tcb_histent(struct adapter *sc, u_int tid, bool addrem) { struct tcb_histent *te; struct tom_data *td = sc->tom_softc; MPASS(tid < sc->tids.ntids); if (td->tcb_history == NULL) return (NULL); if (addrem) rw_wlock(&td->tcb_history_lock); else rw_rlock(&td->tcb_history_lock); te = td->tcb_history[tid]; if (te != NULL) { mtx_lock(&te->te_lock); return (te); /* with both locks held */ } if (addrem) rw_wunlock(&td->tcb_history_lock); else rw_runlock(&td->tcb_history_lock); return (te); } static inline void release_tcb_histent(struct tcb_histent *te) { struct adapter *sc = te->te_adapter; struct tom_data *td = sc->tom_softc; mtx_assert(&te->te_lock, MA_OWNED); mtx_unlock(&te->te_lock); rw_assert(&td->tcb_history_lock, RA_RLOCKED); rw_runlock(&td->tcb_history_lock); } static void request_tcb(void *arg) { struct tcb_histent *te = arg; mtx_assert(&te->te_lock, MA_OWNED); /* Noone else is supposed to update the histent. */ MPASS(!(te->te_flags & TE_RPL_PENDING)); if (send_get_tcb(te->te_adapter, te->te_tid) == 0) te->te_flags |= TE_RPL_PENDING; else callout_schedule(&te->te_callout, hz / 100); } static void update_tcb_histent(struct tcb_histent *te, const uint64_t *tcb) { struct tom_data *td = te->te_adapter->tom_softc; uint64_t tflags = get_tcb_tflags(tcb); uint8_t sample = 0; if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != GET_TCB_FIELD(tcb, SND_UNA_RAW)) { if (GET_TCB_FIELD(tcb, T_RXTSHIFT) != 0) sample |= TS_RTO; if (GET_TCB_FIELD(tcb, T_DUPACKS) != 0) sample |= TS_DUPACKS; if (GET_TCB_FIELD(tcb, T_DUPACKS) >= td->dupack_threshold) sample |= TS_FASTREXMT; } if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != 0) { uint32_t snd_wnd; sample |= TS_SND_BACKLOGGED; /* for whatever reason. */ snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV); if (tflags & V_TF_RECV_SCALE(1)) snd_wnd <<= GET_TCB_FIELD(tcb, RCV_SCALE); if (GET_TCB_FIELD(tcb, SND_CWND) < snd_wnd) sample |= TS_CWND_LIMITED; /* maybe due to CWND */ } if (tflags & V_TF_CCTRL_ECN(1)) { /* * CE marker on incoming IP hdr, echoing ECE back in the TCP * hdr. Indicates congestion somewhere on the way from the peer * to this node. */ if (tflags & V_TF_CCTRL_ECE(1)) sample |= TS_ECN_ECE; /* * ECE seen and CWR sent (or about to be sent). Might indicate * congestion on the way to the peer. This node is reducing its * congestion window in response. */ if (tflags & (V_TF_CCTRL_CWR(1) | V_TF_CCTRL_RFR(1))) sample |= TS_ECN_CWR; } te->te_sample[te->te_pidx] = sample; if (++te->te_pidx == nitems(te->te_sample)) te->te_pidx = 0; memcpy(te->te_tcb, tcb, TCB_SIZE); te->te_flags |= TE_ACTIVE; } static int do_get_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_get_tcb_rpl *cpl = mtod(m, const void *); const uint64_t *tcb = (const uint64_t *)(const void *)(cpl + 1); struct tcb_histent *te; const u_int tid = GET_TID(cpl); bool remove; remove = GET_TCB_FIELD(tcb, T_STATE) == TCPS_CLOSED; te = lookup_tcb_histent(sc, tid, remove); if (te == NULL) { /* Not in the history. Who issued the GET_TCB for this? */ device_printf(sc->dev, "tcb %u: flags 0x%016jx, state %u, " "srtt %u, sscale %u, rscale %u, cookie 0x%x\n", tid, (uintmax_t)get_tcb_tflags(tcb), GET_TCB_FIELD(tcb, T_STATE), GET_TCB_FIELD(tcb, T_SRTT), GET_TCB_FIELD(tcb, SND_SCALE), GET_TCB_FIELD(tcb, RCV_SCALE), cpl->cookie); goto done; } MPASS(te->te_flags & TE_RPL_PENDING); te->te_flags &= ~TE_RPL_PENDING; if (remove) { remove_tcb_histent(te); } else { update_tcb_histent(te, tcb); callout_reset(&te->te_callout, hz / 10, request_tcb, te); release_tcb_histent(te); } done: m_freem(m); return (0); } static void fill_tcp_info_from_tcb(struct adapter *sc, uint64_t *tcb, struct tcp_info *ti) { uint32_t v; ti->tcpi_state = GET_TCB_FIELD(tcb, T_STATE); v = GET_TCB_FIELD(tcb, T_SRTT); ti->tcpi_rtt = tcp_ticks_to_us(sc, v); v = GET_TCB_FIELD(tcb, T_RTTVAR); ti->tcpi_rttvar = tcp_ticks_to_us(sc, v); ti->tcpi_snd_ssthresh = GET_TCB_FIELD(tcb, SND_SSTHRESH); ti->tcpi_snd_cwnd = GET_TCB_FIELD(tcb, SND_CWND); ti->tcpi_rcv_nxt = GET_TCB_FIELD(tcb, RCV_NXT); v = GET_TCB_FIELD(tcb, TX_MAX); ti->tcpi_snd_nxt = v - GET_TCB_FIELD(tcb, SND_NXT_RAW); /* Receive window being advertised by us. */ ti->tcpi_rcv_wscale = GET_TCB_FIELD(tcb, SND_SCALE); /* Yes, SND. */ ti->tcpi_rcv_space = GET_TCB_FIELD(tcb, RCV_WND); /* Send window */ ti->tcpi_snd_wscale = GET_TCB_FIELD(tcb, RCV_SCALE); /* Yes, RCV. */ ti->tcpi_snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV); if (get_tcb_tflags(tcb) & V_TF_RECV_SCALE(1)) ti->tcpi_snd_wnd <<= ti->tcpi_snd_wscale; else ti->tcpi_snd_wscale = 0; } static void fill_tcp_info_from_history(struct adapter *sc, struct tcb_histent *te, struct tcp_info *ti) { fill_tcp_info_from_tcb(sc, te->te_tcb, ti); } /* * Reads the TCB for the given tid using a memory window and copies it to 'buf' * in the same format as CPL_GET_TCB_RPL. */ static void read_tcb_using_memwin(struct adapter *sc, u_int tid, uint64_t *buf) { int i, j, k, rc; uint32_t addr; u_char *tcb, tmp; MPASS(tid < sc->tids.ntids); addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE; rc = read_via_memwin(sc, 2, addr, (uint32_t *)buf, TCB_SIZE); if (rc != 0) return; tcb = (u_char *)buf; for (i = 0, j = TCB_SIZE - 16; i < j; i += 16, j -= 16) { for (k = 0; k < 16; k++) { tmp = tcb[i + k]; tcb[i + k] = tcb[j + k]; tcb[j + k] = tmp; } } } static void fill_tcp_info(struct adapter *sc, u_int tid, struct tcp_info *ti) { uint64_t tcb[TCB_SIZE / sizeof(uint64_t)]; struct tcb_histent *te; ti->tcpi_toe_tid = tid; te = lookup_tcb_histent(sc, tid, false); if (te != NULL) { fill_tcp_info_from_history(sc, te, ti); release_tcb_histent(te); } else { if (!(sc->debug_flags & DF_DISABLE_TCB_CACHE)) { /* XXX: tell firmware to flush TCB cache. */ } read_tcb_using_memwin(sc, tid, tcb); fill_tcp_info_from_tcb(sc, tcb, ti); } } /* * Called by the kernel to allow the TOE driver to "refine" values filled up in * the tcp_info for an offloaded connection. */ static void t4_tcp_info(struct toedev *tod, struct tcpcb *tp, struct tcp_info *ti) { struct adapter *sc = tod->tod_softc; struct toepcb *toep = tp->t_toe; INP_WLOCK_ASSERT(tp->t_inpcb); MPASS(ti != NULL); fill_tcp_info(sc, toep->tid, ti); } #ifdef KERN_TLS static int t4_alloc_tls_session(struct toedev *tod, struct tcpcb *tp, struct ktls_session *tls, int direction) { struct toepcb *toep = tp->t_toe; INP_WLOCK_ASSERT(tp->t_inpcb); MPASS(tls != NULL); return (tls_alloc_ktls(toep, tls, direction)); } #endif /* * The TOE driver will not receive any more CPLs for the tid associated with the * toepcb; release the hold on the inpcb. */ void final_cpl_received(struct toepcb *toep) { struct inpcb *inp = toep->inp; KASSERT(inp != NULL, ("%s: inp is NULL", __func__)); INP_WLOCK_ASSERT(inp); KASSERT(toep->flags & TPF_CPL_PENDING, ("%s: CPL not pending already?", __func__)); CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)", __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags); if (ulp_mode(toep) == ULP_MODE_TCPDDP) release_ddp_resources(toep); else if (ulp_mode(toep) == ULP_MODE_TLS) tls_detach(toep); toep->inp = NULL; toep->flags &= ~TPF_CPL_PENDING; mbufq_drain(&toep->ulp_pdu_reclaimq); if (!(toep->flags & TPF_ATTACHED)) release_offload_resources(toep); if (!in_pcbrele_wlocked(inp)) INP_WUNLOCK(inp); } void insert_tid(struct adapter *sc, int tid, void *ctx, int ntids) { struct tid_info *t = &sc->tids; MPASS(tid >= t->tid_base); MPASS(tid - t->tid_base < t->ntids); t->tid_tab[tid - t->tid_base] = ctx; atomic_add_int(&t->tids_in_use, ntids); } void * lookup_tid(struct adapter *sc, int tid) { struct tid_info *t = &sc->tids; return (t->tid_tab[tid - t->tid_base]); } void update_tid(struct adapter *sc, int tid, void *ctx) { struct tid_info *t = &sc->tids; t->tid_tab[tid - t->tid_base] = ctx; } void remove_tid(struct adapter *sc, int tid, int ntids) { struct tid_info *t = &sc->tids; t->tid_tab[tid - t->tid_base] = NULL; atomic_subtract_int(&t->tids_in_use, ntids); } /* * What mtu_idx to use, given a 4-tuple. Note that both s->mss and tcp_mssopt * have the MSS that we should advertise in our SYN. Advertised MSS doesn't * account for any TCP options so the effective MSS (only payload, no headers or * options) could be different. */ static int find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, struct offload_settings *s) { unsigned short *mtus = &sc->params.mtus[0]; int i, mss, mtu; MPASS(inc != NULL); mss = s->mss > 0 ? s->mss : tcp_mssopt(inc); if (inc->inc_flags & INC_ISIPV6) mtu = mss + sizeof(struct ip6_hdr) + sizeof(struct tcphdr); else mtu = mss + sizeof(struct ip) + sizeof(struct tcphdr); for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mtu; i++) continue; return (i); } /* * Determine the receive window size for a socket. */ u_long select_rcv_wnd(struct socket *so) { unsigned long wnd; SOCKBUF_LOCK_ASSERT(&so->so_rcv); wnd = sbspace(&so->so_rcv); if (wnd < MIN_RCV_WND) wnd = MIN_RCV_WND; return min(wnd, MAX_RCV_WND); } int select_rcv_wscale(void) { int wscale = 0; unsigned long space = sb_max; if (space > MAX_RCV_WND) space = MAX_RCV_WND; while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space) wscale++; return (wscale); } __be64 calc_options0(struct vi_info *vi, struct conn_params *cp) { uint64_t opt0 = 0; opt0 |= F_TCAM_BYPASS; MPASS(cp->wscale >= 0 && cp->wscale <= M_WND_SCALE); opt0 |= V_WND_SCALE(cp->wscale); MPASS(cp->mtu_idx >= 0 && cp->mtu_idx < NMTUS); opt0 |= V_MSS_IDX(cp->mtu_idx); MPASS(cp->ulp_mode >= 0 && cp->ulp_mode <= M_ULP_MODE); opt0 |= V_ULP_MODE(cp->ulp_mode); MPASS(cp->opt0_bufsize >= 0 && cp->opt0_bufsize <= M_RCV_BUFSIZ); opt0 |= V_RCV_BUFSIZ(cp->opt0_bufsize); MPASS(cp->l2t_idx >= 0 && cp->l2t_idx < vi->adapter->vres.l2t.size); opt0 |= V_L2T_IDX(cp->l2t_idx); opt0 |= V_SMAC_SEL(vi->smt_idx); opt0 |= V_TX_CHAN(vi->pi->tx_chan); MPASS(cp->keepalive == 0 || cp->keepalive == 1); opt0 |= V_KEEP_ALIVE(cp->keepalive); MPASS(cp->nagle == 0 || cp->nagle == 1); opt0 |= V_NAGLE(cp->nagle); return (htobe64(opt0)); } __be32 calc_options2(struct vi_info *vi, struct conn_params *cp) { uint32_t opt2 = 0; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; /* * rx flow control, rx coalesce, congestion control, and tx pace are all * explicitly set by the driver. On T5+ the ISS is also set by the * driver to the value picked by the kernel. */ if (is_t4(sc)) { opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID; opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID; } else { opt2 |= F_T5_OPT_2_VALID; /* all 4 valid */ opt2 |= F_T5_ISS; /* ISS provided in CPL */ } MPASS(cp->sack == 0 || cp->sack == 1); opt2 |= V_SACK_EN(cp->sack); MPASS(cp->tstamp == 0 || cp->tstamp == 1); opt2 |= V_TSTAMPS_EN(cp->tstamp); if (cp->wscale > 0) opt2 |= F_WND_SCALE_EN; MPASS(cp->ecn == 0 || cp->ecn == 1); opt2 |= V_CCTRL_ECN(cp->ecn); /* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */ opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]); opt2 |= V_PACE(0); opt2 |= F_RSS_QUEUE_VALID; opt2 |= V_RSS_QUEUE(sc->sge.ofld_rxq[cp->rxq_idx].iq.abs_id); MPASS(cp->cong_algo >= 0 && cp->cong_algo <= M_CONG_CNTRL); opt2 |= V_CONG_CNTRL(cp->cong_algo); MPASS(cp->rx_coalesce == 0 || cp->rx_coalesce == 1); if (cp->rx_coalesce == 1) opt2 |= V_RX_COALESCE(M_RX_COALESCE); opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0); #ifdef USE_DDP_RX_FLOW_CONTROL if (cp->ulp_mode == ULP_MODE_TCPDDP) opt2 |= F_RX_FC_DDP; #endif return (htobe32(opt2)); } uint64_t select_ntuple(struct vi_info *vi, struct l2t_entry *e) { struct adapter *sc = vi->adapter; struct tp_params *tp = &sc->params.tp; uint64_t ntuple = 0; /* * Initialize each of the fields which we care about which are present * in the Compressed Filter Tuple. */ if (tp->vlan_shift >= 0 && EVL_VLANOFTAG(e->vlan) != CPL_L2T_VLAN_NONE) ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift; if (tp->port_shift >= 0) ntuple |= (uint64_t)e->lport << tp->port_shift; if (tp->protocol_shift >= 0) ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift; - if (tp->vnic_shift >= 0 && tp->ingress_config & F_VNIC) { + if (tp->vnic_shift >= 0 && tp->vnic_mode == FW_VNIC_MODE_PF_VF) { ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vi->vin) | V_FT_VNID_ID_PF(sc->pf) | V_FT_VNID_ID_VLD(vi->vfvld)) << tp->vnic_shift; } if (is_t4(sc)) return (htobe32((uint32_t)ntuple)); else return (htobe64(V_FILTER_TUPLE(ntuple))); } static int is_tls_sock(struct socket *so, struct adapter *sc) { struct inpcb *inp = sotoinpcb(so); int i, rc; /* XXX: Eventually add a SO_WANT_TLS socket option perhaps? */ rc = 0; ADAPTER_LOCK(sc); for (i = 0; i < sc->tt.num_tls_rx_ports; i++) { if (inp->inp_lport == htons(sc->tt.tls_rx_ports[i]) || inp->inp_fport == htons(sc->tt.tls_rx_ports[i])) { rc = 1; break; } } ADAPTER_UNLOCK(sc); return (rc); } /* * Initialize various connection parameters. */ void init_conn_params(struct vi_info *vi , struct offload_settings *s, struct in_conninfo *inc, struct socket *so, const struct tcp_options *tcpopt, int16_t l2t_idx, struct conn_params *cp) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct tom_tunables *tt = &sc->tt; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); u_long wnd; MPASS(s->offload != 0); /* Congestion control algorithm */ if (s->cong_algo >= 0) cp->cong_algo = s->cong_algo & M_CONG_CNTRL; else if (sc->tt.cong_algorithm >= 0) cp->cong_algo = tt->cong_algorithm & M_CONG_CNTRL; else { struct cc_algo *cc = CC_ALGO(tp); if (strcasecmp(cc->name, "reno") == 0) cp->cong_algo = CONG_ALG_RENO; else if (strcasecmp(cc->name, "tahoe") == 0) cp->cong_algo = CONG_ALG_TAHOE; if (strcasecmp(cc->name, "newreno") == 0) cp->cong_algo = CONG_ALG_NEWRENO; if (strcasecmp(cc->name, "highspeed") == 0) cp->cong_algo = CONG_ALG_HIGHSPEED; else { /* * Use newreno in case the algorithm selected by the * host stack is not supported by the hardware. */ cp->cong_algo = CONG_ALG_NEWRENO; } } /* Tx traffic scheduling class. */ if (s->sched_class >= 0 && s->sched_class < sc->chip_params->nsched_cls) { cp->tc_idx = s->sched_class; } else cp->tc_idx = -1; /* Nagle's algorithm. */ if (s->nagle >= 0) cp->nagle = s->nagle > 0 ? 1 : 0; else cp->nagle = tp->t_flags & TF_NODELAY ? 0 : 1; /* TCP Keepalive. */ if (V_tcp_always_keepalive || so_options_get(so) & SO_KEEPALIVE) cp->keepalive = 1; else cp->keepalive = 0; /* Optimization that's specific to T5 @ 40G. */ if (tt->tx_align >= 0) cp->tx_align = tt->tx_align > 0 ? 1 : 0; else if (chip_id(sc) == CHELSIO_T5 && (port_top_speed(pi) > 10 || sc->params.nports > 2)) cp->tx_align = 1; else cp->tx_align = 0; /* ULP mode. */ if (can_tls_offload(sc) && (s->tls > 0 || (s->tls < 0 && is_tls_sock(so, sc)))) cp->ulp_mode = ULP_MODE_TLS; else if (s->ddp > 0 || (s->ddp < 0 && sc->tt.ddp && (so_options_get(so) & SO_NO_DDP) == 0)) cp->ulp_mode = ULP_MODE_TCPDDP; else cp->ulp_mode = ULP_MODE_NONE; /* Rx coalescing. */ if (s->rx_coalesce >= 0) cp->rx_coalesce = s->rx_coalesce > 0 ? 1 : 0; else if (cp->ulp_mode == ULP_MODE_TLS) cp->rx_coalesce = 0; else if (tt->rx_coalesce >= 0) cp->rx_coalesce = tt->rx_coalesce > 0 ? 1 : 0; else cp->rx_coalesce = 1; /* default */ /* * Index in the PMTU table. This controls the MSS that we announce in * our SYN initially, but after ESTABLISHED it controls the MSS that we * use to send data. */ cp->mtu_idx = find_best_mtu_idx(sc, inc, s); /* Tx queue for this connection. */ if (s->txq >= 0 && s->txq < vi->nofldtxq) cp->txq_idx = s->txq; else cp->txq_idx = arc4random() % vi->nofldtxq; cp->txq_idx += vi->first_ofld_txq; /* Rx queue for this connection. */ if (s->rxq >= 0 && s->rxq < vi->nofldrxq) cp->rxq_idx = s->rxq; else cp->rxq_idx = arc4random() % vi->nofldrxq; cp->rxq_idx += vi->first_ofld_rxq; if (SOLISTENING(so)) { /* Passive open */ MPASS(tcpopt != NULL); /* TCP timestamp option */ if (tcpopt->tstamp && (s->tstamp > 0 || (s->tstamp < 0 && V_tcp_do_rfc1323))) cp->tstamp = 1; else cp->tstamp = 0; /* SACK */ if (tcpopt->sack && (s->sack > 0 || (s->sack < 0 && V_tcp_do_sack))) cp->sack = 1; else cp->sack = 0; /* Receive window scaling. */ if (tcpopt->wsf > 0 && tcpopt->wsf < 15 && V_tcp_do_rfc1323) cp->wscale = select_rcv_wscale(); else cp->wscale = 0; /* ECN */ if (tcpopt->ecn && /* XXX: review. */ (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn))) cp->ecn = 1; else cp->ecn = 0; wnd = max(so->sol_sbrcv_hiwat, MIN_RCV_WND); cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ); if (tt->sndbuf > 0) cp->sndbuf = tt->sndbuf; else if (so->sol_sbsnd_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) cp->sndbuf = 256 * 1024; else cp->sndbuf = so->sol_sbsnd_hiwat; } else { /* Active open */ /* TCP timestamp option */ if (s->tstamp > 0 || (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP))) cp->tstamp = 1; else cp->tstamp = 0; /* SACK */ if (s->sack > 0 || (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT))) cp->sack = 1; else cp->sack = 0; /* Receive window scaling */ if (tp->t_flags & TF_REQ_SCALE) cp->wscale = select_rcv_wscale(); else cp->wscale = 0; /* ECN */ if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1)) cp->ecn = 1; else cp->ecn = 0; SOCKBUF_LOCK(&so->so_rcv); wnd = max(select_rcv_wnd(so), MIN_RCV_WND); SOCKBUF_UNLOCK(&so->so_rcv); cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ); if (tt->sndbuf > 0) cp->sndbuf = tt->sndbuf; else { SOCKBUF_LOCK(&so->so_snd); if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) cp->sndbuf = 256 * 1024; else cp->sndbuf = so->so_snd.sb_hiwat; SOCKBUF_UNLOCK(&so->so_snd); } } cp->l2t_idx = l2t_idx; /* This will be initialized on ESTABLISHED. */ cp->emss = 0; } int negative_advice(int status) { return (status == CPL_ERR_RTX_NEG_ADVICE || status == CPL_ERR_PERSIST_NEG_ADVICE || status == CPL_ERR_KEEPALV_NEG_ADVICE); } static int alloc_tid_tab(struct tid_info *t, int flags) { MPASS(t->ntids > 0); MPASS(t->tid_tab == NULL); t->tid_tab = malloc(t->ntids * sizeof(*t->tid_tab), M_CXGBE, M_ZERO | flags); if (t->tid_tab == NULL) return (ENOMEM); atomic_store_rel_int(&t->tids_in_use, 0); return (0); } static void free_tid_tab(struct tid_info *t) { KASSERT(t->tids_in_use == 0, ("%s: %d tids still in use.", __func__, t->tids_in_use)); free(t->tid_tab, M_CXGBE); t->tid_tab = NULL; } static int alloc_stid_tab(struct tid_info *t, int flags) { MPASS(t->nstids > 0); MPASS(t->stid_tab == NULL); t->stid_tab = malloc(t->nstids * sizeof(*t->stid_tab), M_CXGBE, M_ZERO | flags); if (t->stid_tab == NULL) return (ENOMEM); mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF); t->stids_in_use = 0; TAILQ_INIT(&t->stids); t->nstids_free_head = t->nstids; return (0); } static void free_stid_tab(struct tid_info *t) { KASSERT(t->stids_in_use == 0, ("%s: %d tids still in use.", __func__, t->stids_in_use)); if (mtx_initialized(&t->stid_lock)) mtx_destroy(&t->stid_lock); free(t->stid_tab, M_CXGBE); t->stid_tab = NULL; } static void free_tid_tabs(struct tid_info *t) { free_tid_tab(t); free_stid_tab(t); } static int alloc_tid_tabs(struct tid_info *t) { int rc; rc = alloc_tid_tab(t, M_NOWAIT); if (rc != 0) goto failed; rc = alloc_stid_tab(t, M_NOWAIT); if (rc != 0) goto failed; return (0); failed: free_tid_tabs(t); return (rc); } static inline void alloc_tcb_history(struct adapter *sc, struct tom_data *td) { if (sc->tids.ntids == 0 || sc->tids.ntids > 1024) return; rw_init(&td->tcb_history_lock, "TCB history"); td->tcb_history = malloc(sc->tids.ntids * sizeof(*td->tcb_history), M_CXGBE, M_ZERO | M_NOWAIT); td->dupack_threshold = G_DUPACKTHRESH(t4_read_reg(sc, A_TP_PARA_REG0)); } static inline void free_tcb_history(struct adapter *sc, struct tom_data *td) { #ifdef INVARIANTS int i; if (td->tcb_history != NULL) { for (i = 0; i < sc->tids.ntids; i++) { MPASS(td->tcb_history[i] == NULL); } } #endif free(td->tcb_history, M_CXGBE); if (rw_initialized(&td->tcb_history_lock)) rw_destroy(&td->tcb_history_lock); } static void free_tom_data(struct adapter *sc, struct tom_data *td) { ASSERT_SYNCHRONIZED_OP(sc); KASSERT(TAILQ_EMPTY(&td->toep_list), ("%s: TOE PCB list is not empty.", __func__)); KASSERT(td->lctx_count == 0, ("%s: lctx hash table is not empty.", __func__)); t4_free_ppod_region(&td->pr); if (td->listen_mask != 0) hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask); if (mtx_initialized(&td->unsent_wr_lock)) mtx_destroy(&td->unsent_wr_lock); if (mtx_initialized(&td->lctx_hash_lock)) mtx_destroy(&td->lctx_hash_lock); if (mtx_initialized(&td->toep_list_lock)) mtx_destroy(&td->toep_list_lock); free_tcb_history(sc, td); free_tid_tabs(&sc->tids); free(td, M_CXGBE); } static char * prepare_pkt(int open_type, uint16_t vtag, struct inpcb *inp, int *pktlen, int *buflen) { char *pkt; struct tcphdr *th; int ipv6, len; const int maxlen = max(sizeof(struct ether_header), sizeof(struct ether_vlan_header)) + max(sizeof(struct ip), sizeof(struct ip6_hdr)) + sizeof(struct tcphdr); MPASS(open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN); pkt = malloc(maxlen, M_CXGBE, M_ZERO | M_NOWAIT); if (pkt == NULL) return (NULL); ipv6 = inp->inp_vflag & INP_IPV6; len = 0; if (EVL_VLANOFTAG(vtag) == 0xfff) { struct ether_header *eh = (void *)pkt; if (ipv6) eh->ether_type = htons(ETHERTYPE_IPV6); else eh->ether_type = htons(ETHERTYPE_IP); len += sizeof(*eh); } else { struct ether_vlan_header *evh = (void *)pkt; evh->evl_encap_proto = htons(ETHERTYPE_VLAN); evh->evl_tag = htons(vtag); if (ipv6) evh->evl_proto = htons(ETHERTYPE_IPV6); else evh->evl_proto = htons(ETHERTYPE_IP); len += sizeof(*evh); } if (ipv6) { struct ip6_hdr *ip6 = (void *)&pkt[len]; ip6->ip6_vfc = IPV6_VERSION; ip6->ip6_plen = htons(sizeof(struct tcphdr)); ip6->ip6_nxt = IPPROTO_TCP; if (open_type == OPEN_TYPE_ACTIVE) { ip6->ip6_src = inp->in6p_laddr; ip6->ip6_dst = inp->in6p_faddr; } else if (open_type == OPEN_TYPE_LISTEN) { ip6->ip6_src = inp->in6p_laddr; ip6->ip6_dst = ip6->ip6_src; } len += sizeof(*ip6); } else { struct ip *ip = (void *)&pkt[len]; ip->ip_v = IPVERSION; ip->ip_hl = sizeof(*ip) >> 2; ip->ip_tos = inp->inp_ip_tos; ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr)); ip->ip_ttl = inp->inp_ip_ttl; ip->ip_p = IPPROTO_TCP; if (open_type == OPEN_TYPE_ACTIVE) { ip->ip_src = inp->inp_laddr; ip->ip_dst = inp->inp_faddr; } else if (open_type == OPEN_TYPE_LISTEN) { ip->ip_src = inp->inp_laddr; ip->ip_dst = ip->ip_src; } len += sizeof(*ip); } th = (void *)&pkt[len]; if (open_type == OPEN_TYPE_ACTIVE) { th->th_sport = inp->inp_lport; /* network byte order already */ th->th_dport = inp->inp_fport; /* ditto */ } else if (open_type == OPEN_TYPE_LISTEN) { th->th_sport = inp->inp_lport; /* network byte order already */ th->th_dport = th->th_sport; } len += sizeof(th); *pktlen = *buflen = len; return (pkt); } const struct offload_settings * lookup_offload_policy(struct adapter *sc, int open_type, struct mbuf *m, uint16_t vtag, struct inpcb *inp) { const struct t4_offload_policy *op; char *pkt; struct offload_rule *r; int i, matched, pktlen, buflen; static const struct offload_settings allow_offloading_settings = { .offload = 1, .rx_coalesce = -1, .cong_algo = -1, .sched_class = -1, .tstamp = -1, .sack = -1, .nagle = -1, .ecn = -1, .ddp = -1, .tls = -1, .txq = -1, .rxq = -1, .mss = -1, }; static const struct offload_settings disallow_offloading_settings = { .offload = 0, /* rest is irrelevant when offload is off. */ }; rw_assert(&sc->policy_lock, RA_LOCKED); /* * If there's no Connection Offloading Policy attached to the device * then we need to return a default static policy. If * "cop_managed_offloading" is true, then we need to disallow * offloading until a COP is attached to the device. Otherwise we * allow offloading ... */ op = sc->policy; if (op == NULL) { if (sc->tt.cop_managed_offloading) return (&disallow_offloading_settings); else return (&allow_offloading_settings); } switch (open_type) { case OPEN_TYPE_ACTIVE: case OPEN_TYPE_LISTEN: pkt = prepare_pkt(open_type, vtag, inp, &pktlen, &buflen); break; case OPEN_TYPE_PASSIVE: MPASS(m != NULL); pkt = mtod(m, char *); MPASS(*pkt == CPL_PASS_ACCEPT_REQ); pkt += sizeof(struct cpl_pass_accept_req); pktlen = m->m_pkthdr.len - sizeof(struct cpl_pass_accept_req); buflen = m->m_len - sizeof(struct cpl_pass_accept_req); break; default: MPASS(0); return (&disallow_offloading_settings); } if (pkt == NULL || pktlen == 0 || buflen == 0) return (&disallow_offloading_settings); matched = 0; r = &op->rule[0]; for (i = 0; i < op->nrules; i++, r++) { if (r->open_type != open_type && r->open_type != OPEN_TYPE_DONTCARE) { continue; } matched = bpf_filter(r->bpf_prog.bf_insns, pkt, pktlen, buflen); if (matched) break; } if (open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN) free(pkt, M_CXGBE); return (matched ? &r->settings : &disallow_offloading_settings); } static void reclaim_wr_resources(void *arg, int count) { struct tom_data *td = arg; STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list); struct cpl_act_open_req *cpl; u_int opcode, atid, tid; struct wrqe *wr; struct adapter *sc = td_adapter(td); mtx_lock(&td->unsent_wr_lock); STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe); mtx_unlock(&td->unsent_wr_lock); while ((wr = STAILQ_FIRST(&twr_list)) != NULL) { STAILQ_REMOVE_HEAD(&twr_list, link); cpl = wrtod(wr); opcode = GET_OPCODE(cpl); switch (opcode) { case CPL_ACT_OPEN_REQ: case CPL_ACT_OPEN_REQ6: atid = G_TID_TID(be32toh(OPCODE_TID(cpl))); CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid); act_open_failure_cleanup(sc, atid, EHOSTUNREACH); free(wr, M_CXGBE); break; case CPL_PASS_ACCEPT_RPL: tid = GET_TID(cpl); CTR2(KTR_CXGBE, "%s: tid %u ", __func__, tid); synack_failure_cleanup(sc, tid); free(wr, M_CXGBE); break; default: log(LOG_ERR, "%s: leaked work request %p, wr_len %d, " "opcode %x\n", __func__, wr, wr->wr_len, opcode); /* WR not freed here; go look at it with a debugger. */ } } } /* * Ground control to Major TOM * Commencing countdown, engines on */ static int t4_tom_activate(struct adapter *sc) { struct tom_data *td; struct toedev *tod; struct vi_info *vi; int i, rc, v; ASSERT_SYNCHRONIZED_OP(sc); /* per-adapter softc for TOM */ td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT); if (td == NULL) return (ENOMEM); /* List of TOE PCBs and associated lock */ mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF); TAILQ_INIT(&td->toep_list); /* Listen context */ mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF); td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE, &td->listen_mask, HASH_NOWAIT); /* List of WRs for which L2 resolution failed */ mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF); STAILQ_INIT(&td->unsent_wr_list); TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td); /* TID tables */ rc = alloc_tid_tabs(&sc->tids); if (rc != 0) goto done; rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp, t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods"); if (rc != 0) goto done; t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK, V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask); alloc_tcb_history(sc, td); /* toedev ops */ tod = &td->tod; init_toedev(tod); tod->tod_softc = sc; tod->tod_connect = t4_connect; tod->tod_listen_start = t4_listen_start; tod->tod_listen_stop = t4_listen_stop; tod->tod_rcvd = t4_rcvd; tod->tod_output = t4_tod_output; tod->tod_send_rst = t4_send_rst; tod->tod_send_fin = t4_send_fin; tod->tod_pcb_detach = t4_pcb_detach; tod->tod_l2_update = t4_l2_update; tod->tod_syncache_added = t4_syncache_added; tod->tod_syncache_removed = t4_syncache_removed; tod->tod_syncache_respond = t4_syncache_respond; tod->tod_offload_socket = t4_offload_socket; tod->tod_ctloutput = t4_ctloutput; tod->tod_tcp_info = t4_tcp_info; #ifdef KERN_TLS tod->tod_alloc_tls_session = t4_alloc_tls_session; #endif for_each_port(sc, i) { for_each_vi(sc->port[i], v, vi) { TOEDEV(vi->ifp) = &td->tod; } } sc->tom_softc = td; register_toedev(sc->tom_softc); done: if (rc != 0) free_tom_data(sc, td); return (rc); } static int t4_tom_deactivate(struct adapter *sc) { int rc = 0; struct tom_data *td = sc->tom_softc; ASSERT_SYNCHRONIZED_OP(sc); if (td == NULL) return (0); /* XXX. KASSERT? */ if (sc->offload_map != 0) return (EBUSY); /* at least one port has IFCAP_TOE enabled */ if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI)) return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */ mtx_lock(&td->toep_list_lock); if (!TAILQ_EMPTY(&td->toep_list)) rc = EBUSY; mtx_unlock(&td->toep_list_lock); mtx_lock(&td->lctx_hash_lock); if (td->lctx_count > 0) rc = EBUSY; mtx_unlock(&td->lctx_hash_lock); taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources); mtx_lock(&td->unsent_wr_lock); if (!STAILQ_EMPTY(&td->unsent_wr_list)) rc = EBUSY; mtx_unlock(&td->unsent_wr_lock); if (rc == 0) { unregister_toedev(sc->tom_softc); free_tom_data(sc, td); sc->tom_softc = NULL; } return (rc); } static int t4_aio_queue_tom(struct socket *so, struct kaiocb *job) { struct tcpcb *tp = so_sototcpcb(so); struct toepcb *toep = tp->t_toe; int error; if (ulp_mode(toep) == ULP_MODE_TCPDDP) { error = t4_aio_queue_ddp(so, job); if (error != EOPNOTSUPP) return (error); } return (t4_aio_queue_aiotx(so, job)); } static int t4_ctloutput_tom(struct socket *so, struct sockopt *sopt) { if (sopt->sopt_level != IPPROTO_TCP) return (tcp_ctloutput(so, sopt)); switch (sopt->sopt_name) { case TCP_TLSOM_SET_TLS_CONTEXT: case TCP_TLSOM_GET_TLS_TOM: case TCP_TLSOM_CLR_TLS_TOM: case TCP_TLSOM_CLR_QUIES: return (t4_ctloutput_tls(so, sopt)); default: return (tcp_ctloutput(so, sopt)); } } static int t4_tom_mod_load(void) { struct protosw *tcp_protosw, *tcp6_protosw; /* CPL handlers */ t4_register_cpl_handler(CPL_GET_TCB_RPL, do_get_tcb_rpl); t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl2, CPL_COOKIE_TOM); t4_init_connect_cpl_handlers(); t4_init_listen_cpl_handlers(); t4_init_cpl_io_handlers(); t4_ddp_mod_load(); t4_tls_mod_load(); tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM); if (tcp_protosw == NULL) return (ENOPROTOOPT); bcopy(tcp_protosw, &toe_protosw, sizeof(toe_protosw)); bcopy(tcp_protosw->pr_usrreqs, &toe_usrreqs, sizeof(toe_usrreqs)); toe_usrreqs.pru_aio_queue = t4_aio_queue_tom; toe_protosw.pr_ctloutput = t4_ctloutput_tom; toe_protosw.pr_usrreqs = &toe_usrreqs; tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM); if (tcp6_protosw == NULL) return (ENOPROTOOPT); bcopy(tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw)); bcopy(tcp6_protosw->pr_usrreqs, &toe6_usrreqs, sizeof(toe6_usrreqs)); toe6_usrreqs.pru_aio_queue = t4_aio_queue_tom; toe6_protosw.pr_ctloutput = t4_ctloutput_tom; toe6_protosw.pr_usrreqs = &toe6_usrreqs; return (t4_register_uld(&tom_uld_info)); } static void tom_uninit(struct adapter *sc, void *arg __unused) { if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun")) return; /* Try to free resources (works only if no port has IFCAP_TOE) */ if (uld_active(sc, ULD_TOM)) t4_deactivate_uld(sc, ULD_TOM); end_synchronized_op(sc, 0); } static int t4_tom_mod_unload(void) { t4_iterate(tom_uninit, NULL); if (t4_unregister_uld(&tom_uld_info) == EBUSY) return (EBUSY); t4_tls_mod_unload(); t4_ddp_mod_unload(); t4_uninit_connect_cpl_handlers(); t4_uninit_listen_cpl_handlers(); t4_uninit_cpl_io_handlers(); t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, NULL, CPL_COOKIE_TOM); t4_register_cpl_handler(CPL_GET_TCB_RPL, NULL); return (0); } #endif /* TCP_OFFLOAD */ static int t4_tom_modevent(module_t mod, int cmd, void *arg) { int rc = 0; #ifdef TCP_OFFLOAD switch (cmd) { case MOD_LOAD: rc = t4_tom_mod_load(); break; case MOD_UNLOAD: rc = t4_tom_mod_unload(); break; default: rc = EINVAL; } #else printf("t4_tom: compiled without TCP_OFFLOAD support.\n"); rc = EOPNOTSUPP; #endif return (rc); } static moduledata_t t4_tom_moddata= { "t4_tom", t4_tom_modevent, 0 }; MODULE_VERSION(t4_tom, 1); MODULE_DEPEND(t4_tom, toecore, 1, 1, 1); MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1); DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY);