Index: stable/9/sys/dev/qlxgbe/ql_dbg.h =================================================================== --- stable/9/sys/dev/qlxgbe/ql_dbg.h (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_dbg.h (revision 330557) @@ -1,99 +1,103 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * File : ql_dbg.h * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #ifndef _QL_DBG_H_ #define _QL_DBG_H_ extern void ql_dump_buf8(qla_host_t *ha, const char *str, void *dbuf, uint32_t len); extern void ql_dump_buf16(qla_host_t *ha, const char *str, void *dbuf, uint32_t len16); extern void ql_dump_buf32(qla_host_t *ha, const char *str, void *dbuf, uint32_t len32); -#define INJCT_RX_RXB_INVAL 0x00001 -#define INJCT_RX_MP_NULL 0x00002 -#define INJCT_LRO_RXB_INVAL 0x00003 -#define INJCT_LRO_MP_NULL 0x00004 -#define INJCT_NUM_HNDLE_INVALID 0x00005 -#define INJCT_RDWR_INDREG_FAILURE 0x00006 -#define INJCT_RDWR_OFFCHIPMEM_FAILURE 0x00007 -#define INJCT_MBX_CMD_FAILURE 0x00008 -#define INJCT_HEARTBEAT_FAILURE 0x00009 -#define INJCT_TEMPERATURE_FAILURE 0x0000A -#define INJCT_M_GETCL_M_GETJCL_FAILURE 0x0000B +#define INJCT_RX_RXB_INVAL 0x00001 +#define INJCT_RX_MP_NULL 0x00002 +#define INJCT_LRO_RXB_INVAL 0x00003 +#define INJCT_LRO_MP_NULL 0x00004 +#define INJCT_NUM_HNDLE_INVALID 0x00005 +#define INJCT_RDWR_INDREG_FAILURE 0x00006 +#define INJCT_RDWR_OFFCHIPMEM_FAILURE 0x00007 +#define INJCT_MBX_CMD_FAILURE 0x00008 +#define INJCT_HEARTBEAT_FAILURE 0x00009 +#define INJCT_TEMPERATURE_FAILURE 0x0000A +#define INJCT_M_GETCL_M_GETJCL_FAILURE 0x0000B +#define INJCT_INV_CONT_OPCODE 0x0000C +#define INJCT_SGL_RCV_INV_DESC_COUNT 0x0000D +#define INJCT_SGL_LRO_INV_DESC_COUNT 0x0000E +#define INJCT_PEER_PORT_FAILURE_ERR_RECOVERY 0x0000F #ifdef QL_DBG #define QL_DPRINT1(ha, x) if (ha->dbg_level & 0x0001) device_printf x #define QL_DPRINT2(ha, x) if (ha->dbg_level & 0x0002) device_printf x #define QL_DPRINT4(ha, x) if (ha->dbg_level & 0x0004) device_printf x #define QL_DPRINT8(ha, x) if (ha->dbg_level & 0x0008) device_printf x #define QL_DPRINT10(ha, x) if (ha->dbg_level & 0x0010) device_printf x #define QL_DPRINT20(ha, x) if (ha->dbg_level & 0x0020) device_printf x #define QL_DPRINT40(ha, x) if (ha->dbg_level & 0x0040) device_printf x #define QL_DPRINT80(ha, x) if (ha->dbg_level & 0x0080) device_printf x #define QL_DUMP_BUFFER8(h, s, b, n) if (h->dbg_level & 0x08000000)\ qla_dump_buf8(h, s, b, n) #define QL_DUMP_BUFFER16(h, s, b, n) if (h->dbg_level & 0x08000000)\ qla_dump_buf16(h, s, b, n) #define QL_DUMP_BUFFER32(h, s, b, n) if (h->dbg_level & 0x08000000)\ qla_dump_buf32(h, s, b, n) #define QL_ASSERT(ha, x, y) if (!x && !ha->err_inject) panic y #define QL_ERR_INJECT(ha, val) (ha->err_inject == val) #else #define QL_DPRINT1(ha, x) #define QL_DPRINT2(ha, x) #define QL_DPRINT4(ha, x) #define QL_DPRINT8(ha, x) #define QL_DPRINT10(ha, x) #define QL_DPRINT20(ha, x) #define QL_DPRINT40(ha, x) #define QL_DPRINT80(ha, x) #define QL_DUMP_BUFFER8(h, s, b, n) #define QL_DUMP_BUFFER16(h, s, b, n) #define QL_DUMP_BUFFER32(h, s, b, n) #define QL_ASSERT(ha, x, y) #define QL_ERR_INJECT(ha, val) 0 #endif #endif /* #ifndef _QL_DBG_H_ */ Index: stable/9/sys/dev/qlxgbe/ql_def.h =================================================================== --- stable/9/sys/dev/qlxgbe/ql_def.h (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_def.h (revision 330557) @@ -1,276 +1,282 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * File: ql_def.h * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #ifndef _QL_DEF_H_ #define _QL_DEF_H_ #define BIT_0 (0x1 << 0) #define BIT_1 (0x1 << 1) #define BIT_2 (0x1 << 2) #define BIT_3 (0x1 << 3) #define BIT_4 (0x1 << 4) #define BIT_5 (0x1 << 5) #define BIT_6 (0x1 << 6) #define BIT_7 (0x1 << 7) #define BIT_8 (0x1 << 8) #define BIT_9 (0x1 << 9) #define BIT_10 (0x1 << 10) #define BIT_11 (0x1 << 11) #define BIT_12 (0x1 << 12) #define BIT_13 (0x1 << 13) #define BIT_14 (0x1 << 14) #define BIT_15 (0x1 << 15) #define BIT_16 (0x1 << 16) #define BIT_17 (0x1 << 17) #define BIT_18 (0x1 << 18) #define BIT_19 (0x1 << 19) #define BIT_20 (0x1 << 20) #define BIT_21 (0x1 << 21) #define BIT_22 (0x1 << 22) #define BIT_23 (0x1 << 23) #define BIT_24 (0x1 << 24) #define BIT_25 (0x1 << 25) #define BIT_26 (0x1 << 26) #define BIT_27 (0x1 << 27) #define BIT_28 (0x1 << 28) #define BIT_29 (0x1 << 29) #define BIT_30 (0x1 << 30) #define BIT_31 (0x1 << 31) struct qla_rx_buf { struct mbuf *m_head; bus_dmamap_t map; bus_addr_t paddr; uint32_t handle; void *next; }; typedef struct qla_rx_buf qla_rx_buf_t; struct qla_rx_ring { qla_rx_buf_t rx_buf[NUM_RX_DESCRIPTORS]; }; typedef struct qla_rx_ring qla_rx_ring_t; struct qla_tx_buf { struct mbuf *m_head; bus_dmamap_t map; }; typedef struct qla_tx_buf qla_tx_buf_t; #define QLA_MAX_SEGMENTS 62 /* maximum # of segs in a sg list */ #define QLA_MAX_MTU 9000 #define QLA_STD_FRAME_SIZE 1514 #define QLA_MAX_TSO_FRAME_SIZE ((64 * 1024 - 1) + 22) /* Number of MSIX/MSI Vectors required */ struct qla_ivec { uint32_t sds_idx; void *ha; struct resource *irq; void *handle; int irq_rid; }; typedef struct qla_ivec qla_ivec_t; #define QLA_WATCHDOG_CALLOUT_TICKS 2 typedef struct _qla_tx_ring { qla_tx_buf_t tx_buf[NUM_TX_DESCRIPTORS]; uint64_t count; uint64_t iscsi_pkt_count; } qla_tx_ring_t; typedef struct _qla_tx_fp { struct mtx tx_mtx; char tx_mtx_name[32]; struct buf_ring *tx_br; struct task fp_task; struct taskqueue *fp_taskqueue; void *ha; uint32_t txr_idx; } qla_tx_fp_t; /* * Adapter structure contains the hardware independant information of the * pci function. */ struct qla_host { volatile struct { volatile uint32_t qla_callout_init :1, qla_watchdog_active :1, parent_tag :1, lock_init :1; } flags; volatile uint32_t qla_interface_up; volatile uint32_t stop_rcv; volatile uint32_t qla_watchdog_exit; volatile uint32_t qla_watchdog_exited; volatile uint32_t qla_watchdog_pause; volatile uint32_t qla_watchdog_paused; volatile uint32_t qla_initiate_recovery; volatile uint32_t qla_detach_active; + volatile uint32_t offline; device_t pci_dev; - uint16_t watchdog_ticks; + volatile uint16_t watchdog_ticks; uint8_t pci_func; - uint8_t resvd; /* ioctl related */ struct cdev *ioctl_dev; /* register mapping */ struct resource *pci_reg; int reg_rid; struct resource *pci_reg1; int reg_rid1; /* interrupts */ struct resource *mbx_irq; void *mbx_handle; int mbx_irq_rid; int msix_count; qla_ivec_t irq_vec[MAX_SDS_RINGS]; /* parent dma tag */ bus_dma_tag_t parent_tag; /* interface to o.s */ struct ifnet *ifp; struct ifmedia media; uint16_t max_frame_size; uint16_t rsrvd0; int if_flags; /* hardware access lock */ + struct mtx sp_log_lock; struct mtx hw_lock; volatile uint32_t hw_lock_held; uint64_t hw_lock_failed; /* transmit and receive buffers */ uint32_t txr_idx; /* index of the current tx ring */ qla_tx_ring_t tx_ring[NUM_TX_RINGS]; bus_dma_tag_t tx_tag; struct callout tx_callout; qla_tx_fp_t tx_fp[MAX_SDS_RINGS]; qla_rx_ring_t rx_ring[MAX_RDS_RINGS]; bus_dma_tag_t rx_tag; uint32_t std_replenish; qla_rx_buf_t *rxb_free; uint32_t rxb_free_count; /* stats */ uint32_t err_m_getcl; uint32_t err_m_getjcl; uint32_t err_tx_dmamap_create; uint32_t err_tx_dmamap_load; uint32_t err_tx_defrag; uint64_t rx_frames; uint64_t rx_bytes; uint64_t lro_pkt_count; uint64_t lro_bytes; uint64_t ipv4_lro; uint64_t ipv6_lro; uint64_t tx_frames; uint64_t tx_bytes; uint64_t tx_tso_frames; uint64_t hw_vlan_tx_frames; struct task stats_task; struct taskqueue *stats_tq; uint32_t fw_ver_major; uint32_t fw_ver_minor; uint32_t fw_ver_sub; uint32_t fw_ver_build; /* hardware specific */ qla_hw_t hw; /* debug stuff */ volatile const char *qla_lock; volatile const char *qla_unlock; uint32_t dbg_level; uint32_t enable_minidump; + uint32_t enable_driverstate_dump; + uint32_t enable_error_recovery; + uint32_t ms_delay_after_init; uint8_t fw_ver_str[32]; /* Error Injection Related */ uint32_t err_inject; struct task err_task; struct taskqueue *err_tq; /* Async Event Related */ uint32_t async_event; struct task async_event_task; struct taskqueue *async_event_tq; /* Peer Device */ device_t peer_dev; volatile uint32_t msg_from_peer; #define QL_PEER_MSG_RESET 0x01 #define QL_PEER_MSG_ACK 0x02 }; typedef struct qla_host qla_host_t; /* note that align has to be a power of 2 */ #define QL_ALIGN(size, align) (((size) + ((align) - 1)) & (~((align) - 1))); #define QL_MIN(x, y) ((x < y) ? x : y) #define QL_RUNNING(ifp) (ifp->if_drv_flags & IFF_DRV_RUNNING) /* Return 0, if identical, else 1 */ #define QL_MAC_CMP(mac1, mac2) \ ((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \ (*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1) + +#define QL_INITIATE_RECOVERY(ha) qla_set_error_recovery(ha) #endif /* #ifndef _QL_DEF_H_ */ Index: stable/9/sys/dev/qlxgbe/ql_glbl.h =================================================================== --- stable/9/sys/dev/qlxgbe/ql_glbl.h (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_glbl.h (revision 330557) @@ -1,119 +1,126 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * File: ql_glbl.h * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. * Content: Contains prototypes of the exported functions from each file. */ #ifndef _QL_GLBL_H_ #define _QL_GLBL_H_ /* * from ql_isr.c */ extern void ql_mbx_isr(void *arg); extern void ql_isr(void *arg); extern uint32_t ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count); /* * from ql_os.c */ extern int ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf); extern void ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf); extern int ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp); +extern void qla_set_error_recovery(qla_host_t *ha); /* * from ql_hw.c */ extern int ql_alloc_dma(qla_host_t *ha); extern void ql_free_dma(qla_host_t *ha); extern void ql_hw_add_sysctls(qla_host_t *ha); extern int ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu); extern void qla_confirm_9kb_enable(qla_host_t *ha); extern int ql_init_hw_if(qla_host_t *ha); extern int ql_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt, uint32_t add_multi); extern void ql_del_hw_if(qla_host_t *ha); extern int ql_set_promisc(qla_host_t *ha); extern void qla_reset_promisc(qla_host_t *ha); extern int ql_set_allmulti(qla_host_t *ha); extern void qla_reset_allmulti(qla_host_t *ha); extern void ql_update_link_state(qla_host_t *ha); extern void ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx); extern int ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id); extern void ql_get_stats(qla_host_t *ha); extern void ql_hw_link_status(qla_host_t *ha); extern int ql_hw_check_health(qla_host_t *ha); extern void qla_hw_async_event(qla_host_t *ha); extern int qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb, uint32_t *num_rcvq); extern int qla_hw_del_all_mcast(qla_host_t *ha); extern int ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp); extern void ql_minidump(qla_host_t *ha); extern int ql_minidump_init(qla_host_t *ha); /* * from ql_misc.c */ extern int ql_init_hw(qla_host_t *ha); extern int ql_rdwr_indreg32(qla_host_t *ha, uint32_t addr, uint32_t *val, uint32_t rd); extern int ql_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data); extern int ql_rdwr_offchip_mem(qla_host_t *ha, uint64_t addr, q80_offchip_mem_val_t *val, uint32_t rd); extern void ql_read_mac_addr(qla_host_t *ha); extern int ql_erase_flash(qla_host_t *ha, uint32_t off, uint32_t size); extern int ql_wr_flash_buffer(qla_host_t *ha, uint32_t off, uint32_t size, void *buf); extern int ql_stop_sequence(qla_host_t *ha); extern int ql_start_sequence(qla_host_t *ha, uint16_t index); /* * from ql_ioctl.c */ extern int ql_make_cdev(qla_host_t *ha); extern void ql_del_cdev(qla_host_t *ha); extern unsigned char ql83xx_firmware[]; extern unsigned int ql83xx_firmware_len; extern unsigned char ql83xx_bootloader[]; extern unsigned int ql83xx_bootloader_len; extern unsigned char ql83xx_resetseq[]; extern unsigned int ql83xx_resetseq_len; extern unsigned char ql83xx_minidump[]; extern unsigned int ql83xx_minidump_len; extern void ql_alloc_drvr_state_buffer(qla_host_t *ha); extern void ql_free_drvr_state_buffer(qla_host_t *ha); extern void ql_capture_drvr_state(qla_host_t *ha); +extern void ql_sp_log(qla_host_t *ha, uint16_t fmtstr_idx, uint16_t num_params, + uint32_t param0, uint32_t param1, uint32_t param2, + uint32_t param3, uint32_t param4); +extern void ql_alloc_sp_log_buffer(qla_host_t *ha); +extern void ql_free_sp_log_buffer(qla_host_t *ha); + #endif /* #ifndef_QL_GLBL_H_ */ Index: stable/9/sys/dev/qlxgbe/ql_hw.c =================================================================== --- stable/9/sys/dev/qlxgbe/ql_hw.c (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_hw.c (revision 330557) @@ -1,5442 +1,5667 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: ql_hw.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. * Content: Contains Hardware dependant functions */ #include __FBSDID("$FreeBSD$"); #include "ql_os.h" #include "ql_hw.h" #include "ql_def.h" #include "ql_inline.h" #include "ql_ver.h" #include "ql_glbl.h" #include "ql_dbg.h" #include "ql_minidump.h" /* * Static Functions */ static void qla_del_rcv_cntxt(qla_host_t *ha); static int qla_init_rcv_cntxt(qla_host_t *ha); -static void qla_del_xmt_cntxt(qla_host_t *ha); +static int qla_del_xmt_cntxt(qla_host_t *ha); static int qla_init_xmt_cntxt(qla_host_t *ha); static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause); static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs, uint32_t create); static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id); static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable, int rcv); static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode); static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id); static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr); static int qla_hw_add_all_mcast(qla_host_t *ha); static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds); static int qla_init_nic_func(qla_host_t *ha); static int qla_stop_nic_func(qla_host_t *ha); static int qla_query_fw_dcbx_caps(qla_host_t *ha); static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits); static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits); static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode); static int qla_get_cam_search_mode(qla_host_t *ha); static void ql_minidump_free(qla_host_t *ha); #ifdef QL_DBG static void qla_stop_pegs(qla_host_t *ha) { uint32_t val = 1; ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0); ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0); ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0); ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0); ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0); device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__); } static int qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); if (ret == 1) { ha = (qla_host_t *)arg1; if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { qla_stop_pegs(ha); QLA_UNLOCK(ha, __func__); } } return err; } #endif /* #ifdef QL_DBG */ static int qla_validate_set_port_cfg_bit(uint32_t bits) { if ((bits & 0xF) > 1) return (-1); if (((bits >> 4) & 0xF) > 2) return (-1); if (((bits >> 8) & 0xF) > 2) return (-1); return (0); } static int qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; uint32_t cfg_bits; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); ha = (qla_host_t *)arg1; if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) { err = qla_get_port_config(ha, &cfg_bits); if (err) goto qla_sysctl_set_port_cfg_exit; if (ret & 0x1) { cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE; } else { cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE; } ret = ret >> 4; cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK; if ((ret & 0xF) == 0) { cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED; } else if ((ret & 0xF) == 1){ cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD; } else { cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM; } ret = ret >> 4; cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK; if (ret == 0) { cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV; } else if (ret == 1){ cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT; } else { cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV; } if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { err = qla_set_port_config(ha, cfg_bits); QLA_UNLOCK(ha, __func__); } else { device_printf(ha->pci_dev, "%s: failed\n", __func__); } } else { if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { err = qla_get_port_config(ha, &cfg_bits); QLA_UNLOCK(ha, __func__); } else { device_printf(ha->pci_dev, "%s: failed\n", __func__); } } qla_sysctl_set_port_cfg_exit: return err; } static int qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); ha = (qla_host_t *)arg1; if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) || (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) { if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { err = qla_set_cam_search_mode(ha, (uint32_t)ret); QLA_UNLOCK(ha, __func__); } else { device_printf(ha->pci_dev, "%s: failed\n", __func__); } } else { device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret); } return (err); } static int qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); ha = (qla_host_t *)arg1; if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { err = qla_get_cam_search_mode(ha); QLA_UNLOCK(ha, __func__); } else { device_printf(ha->pci_dev, "%s: failed\n", __func__); } return (err); } static void qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *ctx_oid; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac", CTLFLAG_RD, NULL, "stats_hw_mac"); children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_frames", CTLFLAG_RD, &ha->hw.mac.xmt_frames, "xmt_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_bytes", CTLFLAG_RD, &ha->hw.mac.xmt_bytes, "xmt_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_mcast_pkts", CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts, "xmt_mcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_bcast_pkts", CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts, "xmt_bcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pause_frames", CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames, "xmt_pause_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_cntrl_pkts", CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts, "xmt_cntrl_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_64bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes, "xmt_pkt_lt_64bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_127bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes, "xmt_pkt_lt_127bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_255bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes, "xmt_pkt_lt_255bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_511bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes, "xmt_pkt_lt_511bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_1023bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes, "xmt_pkt_lt_1023bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_1518bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes, "xmt_pkt_lt_1518bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_gt_1518bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes, "xmt_pkt_gt_1518bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_frames", CTLFLAG_RD, &ha->hw.mac.rcv_frames, "rcv_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_bytes", CTLFLAG_RD, &ha->hw.mac.rcv_bytes, "rcv_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_mcast_pkts", CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts, "rcv_mcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_bcast_pkts", CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts, "rcv_bcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pause_frames", CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames, "rcv_pause_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_cntrl_pkts", CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts, "rcv_cntrl_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_64bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes, "rcv_pkt_lt_64bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_127bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes, "rcv_pkt_lt_127bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_255bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes, "rcv_pkt_lt_255bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_511bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes, "rcv_pkt_lt_511bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_1023bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes, "rcv_pkt_lt_1023bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_1518bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes, "rcv_pkt_lt_1518bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_gt_1518bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes, "rcv_pkt_gt_1518bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_len_error", CTLFLAG_RD, &ha->hw.mac.rcv_len_error, "rcv_len_error"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_len_small", CTLFLAG_RD, &ha->hw.mac.rcv_len_small, "rcv_len_small"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_len_large", CTLFLAG_RD, &ha->hw.mac.rcv_len_large, "rcv_len_large"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_jabber", CTLFLAG_RD, &ha->hw.mac.rcv_jabber, "rcv_jabber"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_dropped", CTLFLAG_RD, &ha->hw.mac.rcv_dropped, "rcv_dropped"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "fcs_error", CTLFLAG_RD, &ha->hw.mac.fcs_error, "fcs_error"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "align_error", CTLFLAG_RD, &ha->hw.mac.align_error, "align_error"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_frames, "eswitched_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_bytes", CTLFLAG_RD, &ha->hw.mac.eswitched_bytes, "eswitched_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_mcast_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames, "eswitched_mcast_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_bcast_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames, "eswitched_bcast_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_ucast_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames, "eswitched_ucast_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_err_free_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames, "eswitched_err_free_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_err_free_bytes", CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes, "eswitched_err_free_bytes"); return; } static void qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *ctx_oid; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv", CTLFLAG_RD, NULL, "stats_hw_rcv"); children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "total_bytes", CTLFLAG_RD, &ha->hw.rcv.total_bytes, "total_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "total_pkts", CTLFLAG_RD, &ha->hw.rcv.total_pkts, "total_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "lro_pkt_count", CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count, "lro_pkt_count"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "sw_pkt_count", CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count, "sw_pkt_count"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "ip_chksum_err", CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err, "ip_chksum_err"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_wo_acntxts", CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts, "pkts_wo_acntxts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_dropped_no_sds_card", CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card, "pkts_dropped_no_sds_card"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_dropped_no_sds_host", CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host, "pkts_dropped_no_sds_host"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "oversized_pkts", CTLFLAG_RD, &ha->hw.rcv.oversized_pkts, "oversized_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_dropped_no_rds", CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds, "pkts_dropped_no_rds"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "unxpctd_mcast_pkts", CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts, "unxpctd_mcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "re1_fbq_error", CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error, "re1_fbq_error"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "invalid_mac_addr", CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr, "invalid_mac_addr"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rds_prime_trys", CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys, "rds_prime_trys"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rds_prime_success", CTLFLAG_RD, &ha->hw.rcv.rds_prime_success, "rds_prime_success"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "lro_flows_added", CTLFLAG_RD, &ha->hw.rcv.lro_flows_added, "lro_flows_added"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "lro_flows_deleted", CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted, "lro_flows_deleted"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "lro_flows_active", CTLFLAG_RD, &ha->hw.rcv.lro_flows_active, "lro_flows_active"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_droped_unknown", CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown, "pkts_droped_unknown"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_cnt_oversized", CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized, "pkts_cnt_oversized"); return; } static void qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt", CTLFLAG_RD, NULL, "stats_hw_xmt"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->hw.num_tx_rings; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); /* Tx Related */ SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "total_bytes", CTLFLAG_RD, &ha->hw.xmt[i].total_bytes, "total_bytes"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "total_pkts", CTLFLAG_RD, &ha->hw.xmt[i].total_pkts, "total_pkts"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "errors", CTLFLAG_RD, &ha->hw.xmt[i].errors, "errors"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "pkts_dropped", CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped, "pkts_dropped"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "switch_pkts", CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts, "switch_pkts"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "num_buffers", CTLFLAG_RD, &ha->hw.xmt[i].num_buffers, "num_buffers"); } return; } static void +qlnx_add_hw_mbx_cmpl_stats_sysctls(qla_host_t *ha) +{ + struct sysctl_ctx_list *ctx; + struct sysctl_oid_list *node_children; + + ctx = device_get_sysctl_ctx(ha->pci_dev); + node_children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_lt_200ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[0], + "mbx_completion_time_lt_200ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_200ms_400ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[1], + "mbx_completion_time_200ms_400ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_400ms_600ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[2], + "mbx_completion_time_400ms_600ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_600ms_800ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[3], + "mbx_completion_time_600ms_800ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_800ms_1000ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[4], + "mbx_completion_time_800ms_1000ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_1000ms_1200ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[5], + "mbx_completion_time_1000ms_1200ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_1200ms_1400ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[6], + "mbx_completion_time_1200ms_1400ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_1400ms_1600ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[7], + "mbx_completion_time_1400ms_1600ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_1600ms_1800ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[8], + "mbx_completion_time_1600ms_1800ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_1800ms_2000ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[9], + "mbx_completion_time_1800ms_2000ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_2000ms_2200ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[10], + "mbx_completion_time_2000ms_2200ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_2200ms_2400ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[11], + "mbx_completion_time_2200ms_2400ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_2400ms_2600ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[12], + "mbx_completion_time_2400ms_2600ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_2600ms_2800ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[13], + "mbx_completion_time_2600ms_2800ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_2800ms_3000ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[14], + "mbx_completion_time_2800ms_3000ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_3000ms_4000ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[15], + "mbx_completion_time_3000ms_4000ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_time_4000ms_5000ms", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[16], + "mbx_completion_time_4000ms_5000ms"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_host_mbx_cntrl_timeout", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[17], + "mbx_completion_host_mbx_cntrl_timeout"); + + SYSCTL_ADD_QUAD(ctx, node_children, + OID_AUTO, "mbx_completion_fw_mbx_cntrl_timeout", + CTLFLAG_RD, &ha->hw.mbx_comp_msecs[18], + "mbx_completion_fw_mbx_cntrl_timeout"); + return; +} + +static void qlnx_add_hw_stats_sysctls(qla_host_t *ha) { qlnx_add_hw_mac_stats_sysctls(ha); qlnx_add_hw_rcv_stats_sysctls(ha); qlnx_add_hw_xmt_stats_sysctls(ha); + qlnx_add_hw_mbx_cmpl_stats_sysctls(ha); return; } static void qlnx_add_drvr_sds_stats(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds", CTLFLAG_RD, NULL, "stats_drvr_sds"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->hw.num_sds_rings; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "intr_count", CTLFLAG_RD, &ha->hw.sds[i].intr_count, "intr_count"); SYSCTL_ADD_UINT(ctx, node_children, OID_AUTO, "rx_free", CTLFLAG_RD, &ha->hw.sds[i].rx_free, ha->hw.sds[i].rx_free, "rx_free"); } return; } static void qlnx_add_drvr_rds_stats(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds", CTLFLAG_RD, NULL, "stats_drvr_rds"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->hw.num_rds_rings; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "count", CTLFLAG_RD, &ha->hw.rds[i].count, "count"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "lro_pkt_count", CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count, "lro_pkt_count"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "lro_bytes", CTLFLAG_RD, &ha->hw.rds[i].lro_bytes, "lro_bytes"); } return; } static void qlnx_add_drvr_tx_stats(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt", CTLFLAG_RD, NULL, "stats_drvr_xmt"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->hw.num_tx_rings; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "count", CTLFLAG_RD, &ha->tx_ring[i].count, "count"); #ifdef QL_ENABLE_ISCSI_TLV SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "iscsi_pkt_count", CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count, "iscsi_pkt_count"); #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ } return; } static void qlnx_add_drvr_stats_sysctls(qla_host_t *ha) { qlnx_add_drvr_sds_stats(ha); qlnx_add_drvr_rds_stats(ha); qlnx_add_drvr_tx_stats(ha); return; } /* * Name: ql_hw_add_sysctls * Function: Add P3Plus specific sysctls */ void ql_hw_add_sysctls(qla_host_t *ha) { device_t dev; dev = ha->pci_dev; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings, ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings, ha->hw.num_sds_rings, "Number of Status Descriptor Rings"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings, ha->hw.num_tx_rings, "Number of Transmit Rings"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx, ha->txr_idx, "Tx Ring Used"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs, ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt"); ha->hw.sds_cidx_thres = 32; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres, ha->hw.sds_cidx_thres, "Number of SDS entries to process before updating" " SDS Ring Consumer Index"); ha->hw.rds_pidx_thres = 32; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres, ha->hw.rds_pidx_thres, "Number of Rcv Rings Entries to post before updating" " RDS Ring Producer Index"); ha->hw.rcv_intr_coalesce = (3 << 16) | 256; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW, &ha->hw.rcv_intr_coalesce, ha->hw.rcv_intr_coalesce, "Rcv Intr Coalescing Parameters\n" "\tbits 15:0 max packets\n" "\tbits 31:16 max micro-seconds to wait\n" "\tplease run\n" "\tifconfig down && ifconfig up\n" "\tto take effect \n"); ha->hw.xmt_intr_coalesce = (64 << 16) | 64; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW, &ha->hw.xmt_intr_coalesce, ha->hw.xmt_intr_coalesce, "Xmt Intr Coalescing Parameters\n" "\tbits 15:0 max packets\n" "\tbits 31:16 max micro-seconds to wait\n" "\tplease run\n" "\tifconfig down && ifconfig up\n" "\tto take effect \n"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW, (void *)ha, 0, qla_sysctl_port_cfg, "I", "Set Port Configuration if values below " "otherwise Get Port Configuration\n" "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n" "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n" "\tBits 8-11: std pause cfg; 0 = xmt and rcv;" " 1 = xmt only; 2 = rcv only;\n" ); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW, (void *)ha, 0, qla_sysctl_set_cam_search_mode, "I", "Set CAM Search Mode" "\t 1 = search mode internal\n" "\t 2 = search mode auto\n"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW, (void *)ha, 0, qla_sysctl_get_cam_search_mode, "I", "Get CAM Search Mode" "\t 1 = search mode internal\n" "\t 2 = search mode auto\n"); ha->hw.enable_9kb = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb, ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000"); ha->hw.enable_hw_lro = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro, ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n" "\t 1 : Hardware LRO if LRO is enabled\n" "\t 0 : Software LRO if LRO is enabled\n" "\t Any change requires ifconfig down/up to take effect\n" "\t Note that LRO may be turned off/on via ifconfig\n"); + SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "sp_log_index", CTLFLAG_RW, &ha->hw.sp_log_index, + ha->hw.sp_log_index, "sp_log_index"); + + SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "sp_log_stop", CTLFLAG_RW, &ha->hw.sp_log_stop, + ha->hw.sp_log_stop, "sp_log_stop"); + + ha->hw.sp_log_stop_events = 0; + + SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "sp_log_stop_events", CTLFLAG_RW, + &ha->hw.sp_log_stop_events, + ha->hw.sp_log_stop_events, "Slow path event log is stopped" + " when OR of the following events occur \n" + "\t 0x01 : Heart beat Failure\n" + "\t 0x02 : Temperature Failure\n" + "\t 0x04 : HW Initialization Failure\n" + "\t 0x08 : Interface Initialization Failure\n" + "\t 0x10 : Error Recovery Failure\n"); + ha->hw.mdump_active = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active, ha->hw.mdump_active, "Minidump retrieval is Active"); ha->hw.mdump_done = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "mdump_done", CTLFLAG_RW, &ha->hw.mdump_done, ha->hw.mdump_done, "Minidump has been done and available for retrieval"); ha->hw.mdump_capture_mask = 0xF; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "minidump_capture_mask", CTLFLAG_RW, &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask, "Minidump capture mask"); #ifdef QL_DBG ha->err_inject = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "err_inject", CTLFLAG_RW, &ha->err_inject, ha->err_inject, "Error to be injected\n" "\t\t\t 0: No Errors\n" "\t\t\t 1: rcv: rxb struct invalid\n" "\t\t\t 2: rcv: mp == NULL\n" "\t\t\t 3: lro: rxb struct invalid\n" "\t\t\t 4: lro: mp == NULL\n" "\t\t\t 5: rcv: num handles invalid\n" "\t\t\t 6: reg: indirect reg rd_wr failure\n" "\t\t\t 7: ocm: offchip memory rd_wr failure\n" "\t\t\t 8: mbx: mailbox command failure\n" "\t\t\t 9: heartbeat failure\n" "\t\t\t A: temperature failure\n" "\t\t\t 11: m_getcl or m_getjcl failure\n" ); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW, (void *)ha, 0, qla_sysctl_stop_pegs, "I", "Peg Stop"); #endif /* #ifdef QL_DBG */ ha->hw.user_pri_nic = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic, ha->hw.user_pri_nic, "VLAN Tag User Priority for Normal Ethernet Packets"); ha->hw.user_pri_iscsi = 4; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi, ha->hw.user_pri_iscsi, "VLAN Tag User Priority for iSCSI Packets"); qlnx_add_hw_stats_sysctls(ha); qlnx_add_drvr_stats_sysctls(ha); return; } void ql_hw_link_status(qla_host_t *ha) { device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui); if (ha->hw.link_up) { device_printf(ha->pci_dev, "link Up\n"); } else { device_printf(ha->pci_dev, "link Down\n"); } - if (ha->hw.flags.fduplex) { + if (ha->hw.fduplex) { device_printf(ha->pci_dev, "Full Duplex\n"); } else { device_printf(ha->pci_dev, "Half Duplex\n"); } - if (ha->hw.flags.autoneg) { + if (ha->hw.autoneg) { device_printf(ha->pci_dev, "Auto Negotiation Enabled\n"); } else { device_printf(ha->pci_dev, "Auto Negotiation Disabled\n"); } switch (ha->hw.link_speed) { case 0x710: device_printf(ha->pci_dev, "link speed\t\t 10Gps\n"); break; case 0x3E8: device_printf(ha->pci_dev, "link speed\t\t 1Gps\n"); break; case 0x64: device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n"); break; default: device_printf(ha->pci_dev, "link speed\t\t Unknown\n"); break; } switch (ha->hw.module_type) { case 0x01: device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n"); break; case 0x02: device_printf(ha->pci_dev, "Module Type 10GBase-LR\n"); break; case 0x03: device_printf(ha->pci_dev, "Module Type 10GBase-SR\n"); break; case 0x04: device_printf(ha->pci_dev, "Module Type 10GE Passive Copper(Compliant)[%d m]\n", ha->hw.cable_length); break; case 0x05: device_printf(ha->pci_dev, "Module Type 10GE Active" " Limiting Copper(Compliant)[%d m]\n", ha->hw.cable_length); break; case 0x06: device_printf(ha->pci_dev, "Module Type 10GE Passive Copper" " (Legacy, Best Effort)[%d m]\n", ha->hw.cable_length); break; case 0x07: device_printf(ha->pci_dev, "Module Type 1000Base-SX\n"); break; case 0x08: device_printf(ha->pci_dev, "Module Type 1000Base-LX\n"); break; case 0x09: device_printf(ha->pci_dev, "Module Type 1000Base-CX\n"); break; case 0x0A: device_printf(ha->pci_dev, "Module Type 1000Base-T\n"); break; case 0x0B: device_printf(ha->pci_dev, "Module Type 1GE Passive Copper" "(Legacy, Best Effort)\n"); break; default: device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n", ha->hw.module_type); break; } if (ha->hw.link_faults == 1) device_printf(ha->pci_dev, "SFP Power Fault\n"); } /* * Name: ql_free_dma * Function: Frees the DMA'able memory allocated in ql_alloc_dma() */ void ql_free_dma(qla_host_t *ha) { uint32_t i; if (ha->hw.dma_buf.flags.sds_ring) { for (i = 0; i < ha->hw.num_sds_rings; i++) { ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]); } ha->hw.dma_buf.flags.sds_ring = 0; } if (ha->hw.dma_buf.flags.rds_ring) { for (i = 0; i < ha->hw.num_rds_rings; i++) { ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]); } ha->hw.dma_buf.flags.rds_ring = 0; } if (ha->hw.dma_buf.flags.tx_ring) { ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring); ha->hw.dma_buf.flags.tx_ring = 0; } ql_minidump_free(ha); } /* * Name: ql_alloc_dma * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. */ int ql_alloc_dma(qla_host_t *ha) { device_t dev; uint32_t i, j, size, tx_ring_size; qla_hw_t *hw; qla_hw_tx_cntxt_t *tx_cntxt; uint8_t *vaddr; bus_addr_t paddr; dev = ha->pci_dev; QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); hw = &ha->hw; /* * Allocate Transmit Ring */ tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS); size = (tx_ring_size * ha->hw.num_tx_rings); hw->dma_buf.tx_ring.alignment = 8; hw->dma_buf.tx_ring.size = size + PAGE_SIZE; if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) { device_printf(dev, "%s: tx ring alloc failed\n", __func__); goto ql_alloc_dma_exit; } vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b; paddr = hw->dma_buf.tx_ring.dma_addr; for (i = 0; i < ha->hw.num_tx_rings; i++) { tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr; tx_cntxt->tx_ring_paddr = paddr; vaddr += tx_ring_size; paddr += tx_ring_size; } for (i = 0; i < ha->hw.num_tx_rings; i++) { tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; tx_cntxt->tx_cons = (uint32_t *)vaddr; tx_cntxt->tx_cons_paddr = paddr; vaddr += sizeof (uint32_t); paddr += sizeof (uint32_t); } ha->hw.dma_buf.flags.tx_ring = 1; QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n", __func__, (void *)(hw->dma_buf.tx_ring.dma_addr), hw->dma_buf.tx_ring.dma_b)); /* * Allocate Receive Descriptor Rings */ for (i = 0; i < hw->num_rds_rings; i++) { hw->dma_buf.rds_ring[i].alignment = 8; hw->dma_buf.rds_ring[i].size = (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) { device_printf(dev, "%s: rds ring[%d] alloc failed\n", __func__, i); for (j = 0; j < i; j++) ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]); goto ql_alloc_dma_exit; } QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n", __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr), hw->dma_buf.rds_ring[i].dma_b)); } hw->dma_buf.flags.rds_ring = 1; /* * Allocate Status Descriptor Rings */ for (i = 0; i < hw->num_sds_rings; i++) { hw->dma_buf.sds_ring[i].alignment = 8; hw->dma_buf.sds_ring[i].size = (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) { device_printf(dev, "%s: sds ring alloc failed\n", __func__); for (j = 0; j < i; j++) ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]); goto ql_alloc_dma_exit; } QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n", __func__, i, (void *)(hw->dma_buf.sds_ring[i].dma_addr), hw->dma_buf.sds_ring[i].dma_b)); } for (i = 0; i < hw->num_sds_rings; i++) { hw->sds[i].sds_ring_base = (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; } hw->dma_buf.flags.sds_ring = 1; return 0; ql_alloc_dma_exit: ql_free_dma(ha); return -1; } #define Q8_MBX_MSEC_DELAY 5000 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause) { uint32_t i; uint32_t data; int ret = 0; + uint64_t start_usecs; + uint64_t end_usecs; + uint64_t msecs_200; - if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) { + ql_sp_log(ha, 0, 5, no_pause, h_mbox[0], h_mbox[1], h_mbox[2], h_mbox[3]); + + if (ha->offline || ha->qla_initiate_recovery) { + ql_sp_log(ha, 1, 2, ha->offline, ha->qla_initiate_recovery, 0, 0, 0); + goto exit_qla_mbx_cmd; + } + + if (((ha->err_inject & 0xFFFF) == INJCT_MBX_CMD_FAILURE) && + (((ha->err_inject & ~0xFFFF) == ((h_mbox[0] & 0xFFFF) << 16))|| + !(ha->err_inject & ~0xFFFF))) { ret = -3; - ha->qla_initiate_recovery = 1; + QL_INITIATE_RECOVERY(ha); goto exit_qla_mbx_cmd; } + start_usecs = qla_get_usec_timestamp(); + if (no_pause) i = 1000; else i = Q8_MBX_MSEC_DELAY; while (i) { + + if (ha->qla_initiate_recovery) { + ql_sp_log(ha, 2, 1, ha->qla_initiate_recovery, 0, 0, 0, 0); + return (-1); + } + data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL); if (data == 0) break; if (no_pause) { DELAY(1000); } else { qla_mdelay(__func__, 1); } i--; } if (i == 0) { device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n", __func__, data); + ql_sp_log(ha, 3, 1, data, 0, 0, 0, 0); ret = -1; - ha->qla_initiate_recovery = 1; + ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 2)]++; + QL_INITIATE_RECOVERY(ha); goto exit_qla_mbx_cmd; } for (i = 0; i < n_hmbox; i++) { WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox); h_mbox++; } WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1); i = Q8_MBX_MSEC_DELAY; while (i) { + + if (ha->qla_initiate_recovery) { + ql_sp_log(ha, 4, 1, ha->qla_initiate_recovery, 0, 0, 0, 0); + return (-1); + } + data = READ_REG32(ha, Q8_FW_MBOX_CNTRL); if ((data & 0x3) == 1) { data = READ_REG32(ha, Q8_FW_MBOX0); if ((data & 0xF000) != 0x8000) break; } if (no_pause) { DELAY(1000); } else { qla_mdelay(__func__, 1); } i--; } if (i == 0) { device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n", __func__, data); + ql_sp_log(ha, 5, 1, data, 0, 0, 0, 0); ret = -2; - ha->qla_initiate_recovery = 1; + ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 1)]++; + QL_INITIATE_RECOVERY(ha); goto exit_qla_mbx_cmd; } for (i = 0; i < n_fwmbox; i++) { + + if (ha->qla_initiate_recovery) { + ql_sp_log(ha, 6, 1, ha->qla_initiate_recovery, 0, 0, 0, 0); + return (-1); + } + *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2))); } WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0); WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); + end_usecs = qla_get_usec_timestamp(); + + if (end_usecs > start_usecs) { + msecs_200 = (end_usecs - start_usecs)/(1000 * 200); + + if (msecs_200 < 15) + ha->hw.mbx_comp_msecs[msecs_200]++; + else if (msecs_200 < 20) + ha->hw.mbx_comp_msecs[15]++; + else { + device_printf(ha->pci_dev, "%s: [%ld, %ld] %ld\n", __func__, + start_usecs, end_usecs, msecs_200); + ha->hw.mbx_comp_msecs[16]++; + } + } + ql_sp_log(ha, 7, 5, fw_mbox[0], fw_mbox[1], fw_mbox[2], fw_mbox[3], fw_mbox[4]); + + exit_qla_mbx_cmd: return (ret); } int qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb, uint32_t *num_rcvq) { uint32_t *mbox, err; device_t dev = ha->pci_dev; bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX)); mbox = ha->hw.mbox; mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } err = mbox[0] >> 25; if (supports_9kb != NULL) { if (mbox[16] & 0x80) /* bit 7 of mbox 16 */ *supports_9kb = 1; else *supports_9kb = 0; } if (num_rcvq != NULL) *num_rcvq = ((mbox[6] >> 16) & 0xFFFF); if ((err != 1) && (err != 0)) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs, uint32_t create) { uint32_t i, err; device_t dev = ha->pci_dev; q80_config_intr_t *c_intr; q80_config_intr_rsp_t *c_intr_rsp; c_intr = (q80_config_intr_t *)ha->hw.mbox; bzero(c_intr, (sizeof (q80_config_intr_t))); c_intr->opcode = Q8_MBX_CONFIG_INTR; c_intr->count_version = (sizeof (q80_config_intr_t) >> 2); c_intr->count_version |= Q8_MBX_CMD_VERSION; c_intr->nentries = num_intrs; for (i = 0; i < num_intrs; i++) { if (create) { c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE; c_intr->intr[i].msix_index = start_idx + 1 + i; } else { c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE; c_intr->intr[i].msix_index = ha->hw.intr_id[(start_idx + i)]; } c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X; } if (qla_mbx_cmd(ha, (uint32_t *)c_intr, (sizeof (q80_config_intr_t) >> 2), ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) { - device_printf(dev, "%s: failed0\n", __func__); + device_printf(dev, "%s: %s failed0\n", __func__, + (create ? "create" : "delete")); return (-1); } c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status); if (err) { - device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err, - c_intr_rsp->nentries); + device_printf(dev, "%s: %s failed1 [0x%08x, %d]\n", __func__, + (create ? "create" : "delete"), err, c_intr_rsp->nentries); for (i = 0; i < c_intr_rsp->nentries; i++) { device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n", __func__, i, c_intr_rsp->intr[i].status, c_intr_rsp->intr[i].intr_id, c_intr_rsp->intr[i].intr_src); } return (-1); } for (i = 0; ((i < num_intrs) && create); i++) { if (!c_intr_rsp->intr[i].status) { ha->hw.intr_id[(start_idx + i)] = c_intr_rsp->intr[i].intr_id; ha->hw.intr_src[(start_idx + i)] = c_intr_rsp->intr[i].intr_src; } } return (0); } /* * Name: qla_config_rss * Function: Configure RSS for the context/interface. */ static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 0x255b0ec26d5a56daULL }; static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id) { q80_config_rss_t *c_rss; q80_config_rss_rsp_t *c_rss_rsp; uint32_t err, i; device_t dev = ha->pci_dev; c_rss = (q80_config_rss_t *)ha->hw.mbox; bzero(c_rss, (sizeof (q80_config_rss_t))); c_rss->opcode = Q8_MBX_CONFIG_RSS; c_rss->count_version = (sizeof (q80_config_rss_t) >> 2); c_rss->count_version |= Q8_MBX_CMD_VERSION; c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP | Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP); //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP | // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP); c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS; c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE; c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK; c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID; c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS; c_rss->cntxt_id = cntxt_id; for (i = 0; i < 5; i++) { c_rss->rss_key[i] = rss_key[i]; } if (qla_mbx_cmd(ha, (uint32_t *)c_rss, (sizeof (q80_config_rss_t) >> 2), ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } static int qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count, uint16_t cntxt_id, uint8_t *ind_table) { q80_config_rss_ind_table_t *c_rss_ind; q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp; uint32_t err; device_t dev = ha->pci_dev; if ((count > Q8_RSS_IND_TBL_SIZE) || ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) { device_printf(dev, "%s: illegal count [%d, %d]\n", __func__, start_idx, count); return (-1); } c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox; bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t)); c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE; c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2); c_rss_ind->count_version |= Q8_MBX_CMD_VERSION; c_rss_ind->start_idx = start_idx; c_rss_ind->end_idx = start_idx + count - 1; c_rss_ind->cntxt_id = cntxt_id; bcopy(ind_table, c_rss_ind->ind_table, count); if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind, (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox, (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } /* * Name: qla_config_intr_coalesce * Function: Configure Interrupt Coalescing. */ static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable, int rcv) { q80_config_intr_coalesc_t *intrc; q80_config_intr_coalesc_rsp_t *intrc_rsp; uint32_t err, i; device_t dev = ha->pci_dev; intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox; bzero(intrc, (sizeof (q80_config_intr_coalesc_t))); intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE; intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2); intrc->count_version |= Q8_MBX_CMD_VERSION; if (rcv) { intrc->flags = Q8_MBX_INTRC_FLAGS_RCV; intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF; intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF; } else { intrc->flags = Q8_MBX_INTRC_FLAGS_XMT; intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF; intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF; } intrc->cntxt_id = cntxt_id; if (tenable) { intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC; intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC; for (i = 0; i < ha->hw.num_sds_rings; i++) { intrc->sds_ring_mask |= (1 << i); } intrc->ms_timeout = 1000; } if (qla_mbx_cmd(ha, (uint32_t *)intrc, (sizeof (q80_config_intr_coalesc_t) >> 2), ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } /* * Name: qla_config_mac_addr * Function: binds a MAC address to the context/interface. * Can be unicast, multicast or broadcast. */ static int qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac, uint32_t num_mac) { q80_config_mac_addr_t *cmac; q80_config_mac_addr_rsp_t *cmac_rsp; uint32_t err; device_t dev = ha->pci_dev; int i; uint8_t *mac_cpy = mac_addr; if (num_mac > Q8_MAX_MAC_ADDRS) { device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n", __func__, (add_mac ? "Add" : "Del"), num_mac); return (-1); } cmac = (q80_config_mac_addr_t *)ha->hw.mbox; bzero(cmac, (sizeof (q80_config_mac_addr_t))); cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR; cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2; cmac->count_version |= Q8_MBX_CMD_VERSION; if (add_mac) cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR; else cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR; cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS; cmac->nmac_entries = num_mac; cmac->cntxt_id = ha->hw.rcv_cntxt_id; for (i = 0; i < num_mac; i++) { bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); mac_addr = mac_addr + ETHER_ADDR_LEN; } if (qla_mbx_cmd(ha, (uint32_t *)cmac, (sizeof (q80_config_mac_addr_t) >> 2), ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) { device_printf(dev, "%s: %s failed0\n", __func__, (add_mac ? "Add" : "Del")); return (-1); } cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status); if (err) { device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__, (add_mac ? "Add" : "Del"), err); for (i = 0; i < num_mac; i++) { device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2], mac_cpy[3], mac_cpy[4], mac_cpy[5]); mac_cpy += ETHER_ADDR_LEN; } return (-1); } return 0; } /* * Name: qla_set_mac_rcv_mode * Function: Enable/Disable AllMulticast and Promiscous Modes. */ static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode) { q80_config_mac_rcv_mode_t *rcv_mode; uint32_t err; q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp; device_t dev = ha->pci_dev; rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox; bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t))); rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE; rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2; rcv_mode->count_version |= Q8_MBX_CMD_VERSION; rcv_mode->mode = mode; rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id; if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode, (sizeof (q80_config_mac_rcv_mode_t) >> 2), ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } int ql_set_promisc(qla_host_t *ha) { int ret; ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE; ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); return (ret); } void qla_reset_promisc(qla_host_t *ha) { ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE; (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); } int ql_set_allmulti(qla_host_t *ha) { int ret; ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE; ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); return (ret); } void qla_reset_allmulti(qla_host_t *ha) { ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE; (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); } /* * Name: ql_set_max_mtu * Function: * Sets the maximum transfer unit size for the specified rcv context. */ int ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id) { device_t dev; q80_set_max_mtu_t *max_mtu; q80_set_max_mtu_rsp_t *max_mtu_rsp; uint32_t err; dev = ha->pci_dev; max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox; bzero(max_mtu, (sizeof (q80_set_max_mtu_t))); max_mtu->opcode = Q8_MBX_SET_MAX_MTU; max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2); max_mtu->count_version |= Q8_MBX_CMD_VERSION; max_mtu->cntxt_id = cntxt_id; max_mtu->mtu = mtu; if (qla_mbx_cmd(ha, (uint32_t *)max_mtu, (sizeof (q80_set_max_mtu_t) >> 2), ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) { device_printf(dev, "%s: failed\n", __func__); return -1; } max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id) { device_t dev; q80_link_event_t *lnk; q80_link_event_rsp_t *lnk_rsp; uint32_t err; dev = ha->pci_dev; lnk = (q80_link_event_t *)ha->hw.mbox; bzero(lnk, (sizeof (q80_link_event_t))); lnk->opcode = Q8_MBX_LINK_EVENT_REQ; lnk->count_version = (sizeof (q80_link_event_t) >> 2); lnk->count_version |= Q8_MBX_CMD_VERSION; lnk->cntxt_id = cntxt_id; lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC; if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2), ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id) { device_t dev; q80_config_fw_lro_t *fw_lro; q80_config_fw_lro_rsp_t *fw_lro_rsp; uint32_t err; dev = ha->pci_dev; fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox; bzero(fw_lro, sizeof(q80_config_fw_lro_t)); fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO; fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2); fw_lro->count_version |= Q8_MBX_CMD_VERSION; fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK; fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK; fw_lro->cntxt_id = cntxt_id; if (qla_mbx_cmd(ha, (uint32_t *)fw_lro, (sizeof (q80_config_fw_lro_t) >> 2), ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode) { device_t dev; q80_hw_config_t *hw_config; q80_hw_config_rsp_t *hw_config_rsp; uint32_t err; dev = ha->pci_dev; hw_config = (q80_hw_config_t *)ha->hw.mbox; bzero(hw_config, sizeof (q80_hw_config_t)); hw_config->opcode = Q8_MBX_HW_CONFIG; hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT; hw_config->count_version |= Q8_MBX_CMD_VERSION; hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE; hw_config->u.set_cam_search_mode.mode = search_mode; if (qla_mbx_cmd(ha, (uint32_t *)hw_config, (sizeof (q80_hw_config_t) >> 2), ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_get_cam_search_mode(qla_host_t *ha) { device_t dev; q80_hw_config_t *hw_config; q80_hw_config_rsp_t *hw_config_rsp; uint32_t err; dev = ha->pci_dev; hw_config = (q80_hw_config_t *)ha->hw.mbox; bzero(hw_config, sizeof (q80_hw_config_t)); hw_config->opcode = Q8_MBX_HW_CONFIG; hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT; hw_config->count_version |= Q8_MBX_CMD_VERSION; hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE; if (qla_mbx_cmd(ha, (uint32_t *)hw_config, (sizeof (q80_hw_config_t) >> 2), ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } else { device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__, hw_config_rsp->u.get_cam_search_mode.mode); } return 0; } static int qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size) { device_t dev; q80_get_stats_t *stat; q80_get_stats_rsp_t *stat_rsp; uint32_t err; dev = ha->pci_dev; stat = (q80_get_stats_t *)ha->hw.mbox; bzero(stat, (sizeof (q80_get_stats_t))); stat->opcode = Q8_MBX_GET_STATS; stat->count_version = 2; stat->count_version |= Q8_MBX_CMD_VERSION; stat->cmd = cmd; if (qla_mbx_cmd(ha, (uint32_t *)stat, 2, ha->hw.mbox, (rsp_size >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status); if (err) { return -1; } return 0; } void ql_get_stats(qla_host_t *ha) { q80_get_stats_rsp_t *stat_rsp; q80_mac_stats_t *mstat; q80_xmt_stats_t *xstat; q80_rcv_stats_t *rstat; uint32_t cmd; int i; struct ifnet *ifp = ha->ifp; if (ifp == NULL) return; if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return; } if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { QLA_UNLOCK(ha, __func__); return; } stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; /* * Get MAC Statistics */ cmd = Q8_GET_STATS_CMD_TYPE_MAC; // cmd |= Q8_GET_STATS_CMD_CLEAR; cmd |= ((ha->pci_func & 0x1) << 16); - if (ha->qla_watchdog_pause) + if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || + ha->offline) goto ql_get_stats_exit; if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { mstat = (q80_mac_stats_t *)&stat_rsp->u.mac; bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t)); } else { device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n", __func__, ha->hw.mbox[0]); } /* * Get RCV Statistics */ cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT; // cmd |= Q8_GET_STATS_CMD_CLEAR; cmd |= (ha->hw.rcv_cntxt_id << 16); - if (ha->qla_watchdog_pause) + if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || + ha->offline) goto ql_get_stats_exit; if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv; bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t)); } else { device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n", __func__, ha->hw.mbox[0]); } - if (ha->qla_watchdog_pause) + if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || + ha->offline) goto ql_get_stats_exit; /* * Get XMT Statistics */ - for (i = 0 ; ((i < ha->hw.num_tx_rings) && (!ha->qla_watchdog_pause)); - i++) { + for (i = 0 ; (i < ha->hw.num_tx_rings); i++) { + if (ha->qla_watchdog_pause || + (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || + ha->offline) + goto ql_get_stats_exit; + cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT; // cmd |= Q8_GET_STATS_CMD_CLEAR; cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16); if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t)) == 0) { xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt; bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t)); } else { device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n", __func__, ha->hw.mbox[0]); } } ql_get_stats_exit: QLA_UNLOCK(ha, __func__); return; } /* * Name: qla_tx_tso * Function: Checks if the packet to be transmitted is a candidate for * Large TCP Segment Offload. If yes, the appropriate fields in the Tx * Ring Structure are plugged in. */ static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) { struct ether_vlan_header *eh; struct ip *ip = NULL; struct ip6_hdr *ip6 = NULL; struct tcphdr *th = NULL; uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off; uint16_t etype, opcode, offload = 1; device_t dev; dev = ha->pci_dev; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } hdrlen = 0; switch (etype) { case ETHERTYPE_IP: tcp_opt_off = ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr); if (mp->m_len < tcp_opt_off) { m_copydata(mp, 0, tcp_opt_off, hdr); ip = (struct ip *)(hdr + ehdrlen); } else { ip = (struct ip *)(mp->m_data + ehdrlen); } ip_hlen = ip->ip_hl << 2; opcode = Q8_TX_CMD_OP_XMT_TCP_LSO; if ((ip->ip_p != IPPROTO_TCP) || (ip_hlen != sizeof (struct ip))){ /* IP Options are not supported */ offload = 0; } else th = (struct tcphdr *)((caddr_t)ip + ip_hlen); break; case ETHERTYPE_IPV6: tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) + sizeof (struct tcphdr); if (mp->m_len < tcp_opt_off) { m_copydata(mp, 0, tcp_opt_off, hdr); ip6 = (struct ip6_hdr *)(hdr + ehdrlen); } else { ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); } ip_hlen = sizeof(struct ip6_hdr); opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6; if (ip6->ip6_nxt != IPPROTO_TCP) { //device_printf(dev, "%s: ipv6\n", __func__); offload = 0; } else th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); break; default: QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__)); offload = 0; break; } if (!offload) return (-1); tcp_hlen = th->th_off << 2; hdrlen = ehdrlen + ip_hlen + tcp_hlen; if (mp->m_len < hdrlen) { if (mp->m_len < tcp_opt_off) { if (tcp_hlen > sizeof(struct tcphdr)) { m_copydata(mp, tcp_opt_off, (tcp_hlen - sizeof(struct tcphdr)), &hdr[tcp_opt_off]); } } else { m_copydata(mp, 0, hdrlen, hdr); } } tx_cmd->mss = mp->m_pkthdr.tso_segsz; tx_cmd->flags_opcode = opcode ; tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; tx_cmd->total_hdr_len = hdrlen; /* Check for Multicast least significant bit of MSB == 1 */ if (eh->evl_dhost[0] & 0x01) { tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST; } if (mp->m_len < hdrlen) { printf("%d\n", hdrlen); return (1); } return (0); } /* * Name: qla_tx_chksum * Function: Checks if the packet to be transmitted is a candidate for * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx * Ring Structure are plugged in. */ static int qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code, uint32_t *tcp_hdr_off) { struct ether_vlan_header *eh; struct ip *ip; struct ip6_hdr *ip6; uint32_t ehdrlen, ip_hlen; uint16_t etype, opcode, offload = 1; device_t dev; uint8_t buf[sizeof(struct ip6_hdr)]; dev = ha->pci_dev; *op_code = 0; if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0) return (-1); eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + ehdrlen); ip_hlen = sizeof (struct ip); if (mp->m_len < (ehdrlen + ip_hlen)) { m_copydata(mp, ehdrlen, sizeof(struct ip), buf); ip = (struct ip *)buf; } if (ip->ip_p == IPPROTO_TCP) opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM; else if (ip->ip_p == IPPROTO_UDP) opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM; else { //device_printf(dev, "%s: ipv4\n", __func__); offload = 0; } break; case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); ip_hlen = sizeof(struct ip6_hdr); if (mp->m_len < (ehdrlen + ip_hlen)) { m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), buf); ip6 = (struct ip6_hdr *)buf; } if (ip6->ip6_nxt == IPPROTO_TCP) opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6; else if (ip6->ip6_nxt == IPPROTO_UDP) opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6; else { //device_printf(dev, "%s: ipv6\n", __func__); offload = 0; } break; default: offload = 0; break; } if (!offload) return (-1); *op_code = opcode; *tcp_hdr_off = (ip_hlen + ehdrlen); return (0); } #define QLA_TX_MIN_FREE 2 /* * Name: ql_hw_send * Function: Transmits a packet. It first checks if the packet is a * candidate for Large TCP Segment Offload and then for UDP/TCP checksum * offload. If either of these creteria are not met, it is transmitted * as a regular ethernet frame. */ int ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu) { struct ether_vlan_header *eh; qla_hw_t *hw = &ha->hw; q80_tx_cmd_t *tx_cmd, tso_cmd; bus_dma_segment_t *c_seg; uint32_t num_tx_cmds, hdr_len = 0; uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next; device_t dev; int i, ret; uint8_t *src = NULL, *dst = NULL; uint8_t frame_hdr[QL_FRAME_HDR_SIZE]; uint32_t op_code = 0; uint32_t tcp_hdr_off = 0; dev = ha->pci_dev; /* * Always make sure there is atleast one empty slot in the tx_ring * tx_ring is considered full when there only one entry available */ num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2; total_length = mp->m_pkthdr.len; if (total_length > QLA_MAX_TSO_FRAME_SIZE) { device_printf(dev, "%s: total length exceeds maxlen(%d)\n", __func__, total_length); return (EINVAL); } eh = mtod(mp, struct ether_vlan_header *); if (mp->m_pkthdr.csum_flags & CSUM_TSO) { bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t)); src = frame_hdr; ret = qla_tx_tso(ha, mp, &tso_cmd, src); if (!(ret & ~1)) { /* find the additional tx_cmd descriptors required */ if (mp->m_flags & M_VLANTAG) tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN; hdr_len = tso_cmd.total_hdr_len; bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; bytes = QL_MIN(bytes, hdr_len); num_tx_cmds++; hdr_len -= bytes; while (hdr_len) { bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); hdr_len -= bytes; num_tx_cmds++; } hdr_len = tso_cmd.total_hdr_len; if (ret == 0) src = (uint8_t *)eh; } else return (EINVAL); } else { (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off); } if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { ql_hw_tx_done_locked(ha, txr_idx); if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= " "(num_tx_cmds + QLA_TX_MIN_FREE))\n", __func__)); return (-1); } } for (i = 0; i < num_tx_cmds; i++) { int j; j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1); if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) { QL_ASSERT(ha, 0, \ ("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\ __func__, __LINE__, txr_idx, j,\ ha->tx_ring[txr_idx].tx_buf[j].m_head)); return (EINVAL); } } tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx]; if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) { if (nsegs > ha->hw.max_tx_segs) ha->hw.max_tx_segs = nsegs; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); if (op_code) { tx_cmd->flags_opcode = op_code; tx_cmd->tcp_hdr_off = tcp_hdr_off; } else { tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER; } } else { bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t)); ha->tx_tso_frames++; } if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED; if (iscsi_pdu) eh->evl_tag |= ha->hw.user_pri_iscsi << 13; } else if (mp->m_flags & M_VLANTAG) { if (hdr_len) { /* TSO */ tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED | Q8_TX_CMD_FLAGS_HW_VLAN_ID); tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN; } else tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID; ha->hw_vlan_tx_frames++; tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag; if (iscsi_pdu) { tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13; mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci; } } tx_cmd->n_bufs = (uint8_t)nsegs; tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF); tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8))); tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func); c_seg = segs; while (1) { for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) { switch (i) { case 0: tx_cmd->buf1_addr = c_seg->ds_addr; tx_cmd->buf1_len = c_seg->ds_len; break; case 1: tx_cmd->buf2_addr = c_seg->ds_addr; tx_cmd->buf2_len = c_seg->ds_len; break; case 2: tx_cmd->buf3_addr = c_seg->ds_addr; tx_cmd->buf3_len = c_seg->ds_len; break; case 3: tx_cmd->buf4_addr = c_seg->ds_addr; tx_cmd->buf4_len = c_seg->ds_len; break; } c_seg++; nsegs--; } txr_next = hw->tx_cntxt[txr_idx].txr_next = (hw->tx_cntxt[txr_idx].txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); tx_cmd_count++; if (!nsegs) break; tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); } if (mp->m_pkthdr.csum_flags & CSUM_TSO) { /* TSO : Copy the header in the following tx cmd descriptors */ txr_next = hw->tx_cntxt[txr_idx].txr_next; tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; bytes = QL_MIN(bytes, hdr_len); dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN; if (mp->m_flags & M_VLANTAG) { /* first copy the src/dst MAC addresses */ bcopy(src, dst, (ETHER_ADDR_LEN * 2)); dst += (ETHER_ADDR_LEN * 2); src += (ETHER_ADDR_LEN * 2); *((uint16_t *)dst) = htons(ETHERTYPE_VLAN); dst += 2; *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag); dst += 2; /* bytes left in src header */ hdr_len -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN); /* bytes left in TxCmd Entry */ bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN); bcopy(src, dst, bytes); src += bytes; hdr_len -= bytes; } else { bcopy(src, dst, bytes); src += bytes; hdr_len -= bytes; } txr_next = hw->tx_cntxt[txr_idx].txr_next = (hw->tx_cntxt[txr_idx].txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); tx_cmd_count++; while (hdr_len) { tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); bcopy(src, tx_cmd, bytes); src += bytes; hdr_len -= bytes; txr_next = hw->tx_cntxt[txr_idx].txr_next = (hw->tx_cntxt[txr_idx].txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); tx_cmd_count++; } } hw->tx_cntxt[txr_idx].txr_free = hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count; QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\ txr_idx); QL_DPRINT8(ha, (dev, "%s: return\n", __func__)); return (0); } #define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */ static int qla_config_rss_ind_table(qla_host_t *ha) { uint32_t i, count; uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE]; for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) { rss_ind_tbl[i] = i % ha->hw.num_sds_rings; } for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ; i = i + Q8_CONFIG_IND_TBL_SIZE) { if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) { count = Q8_RSS_IND_TBL_MAX_IDX - i + 1; } else { count = Q8_CONFIG_IND_TBL_SIZE; } if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id, rss_ind_tbl)) return (-1); } return (0); } static int qla_config_soft_lro(qla_host_t *ha) { int i; qla_hw_t *hw = &ha->hw; struct lro_ctrl *lro; for (i = 0; i < hw->num_sds_rings; i++) { lro = &hw->sds[i].lro; bzero(lro, sizeof(struct lro_ctrl)); #if (__FreeBSD_version >= 1100101) if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) { device_printf(ha->pci_dev, "%s: tcp_lro_init_args [%d] failed\n", __func__, i); return (-1); } #else if (tcp_lro_init(lro)) { device_printf(ha->pci_dev, "%s: tcp_lro_init [%d] failed\n", __func__, i); return (-1); } #endif /* #if (__FreeBSD_version >= 1100101) */ lro->ifp = ha->ifp; } QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__)); return (0); } static void qla_drain_soft_lro(qla_host_t *ha) { int i; qla_hw_t *hw = &ha->hw; struct lro_ctrl *lro; for (i = 0; i < hw->num_sds_rings; i++) { lro = &hw->sds[i].lro; #if (__FreeBSD_version >= 1100101) tcp_lro_flush_all(lro); #else struct lro_entry *queued; while ((!SLIST_EMPTY(&lro->lro_active))) { queued = SLIST_FIRST(&lro->lro_active); SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } #endif /* #if (__FreeBSD_version >= 1100101) */ } return; } static void qla_free_soft_lro(qla_host_t *ha) { int i; qla_hw_t *hw = &ha->hw; struct lro_ctrl *lro; for (i = 0; i < hw->num_sds_rings; i++) { lro = &hw->sds[i].lro; tcp_lro_free(lro); } return; } /* * Name: ql_del_hw_if * Function: Destroys the hardware specific entities corresponding to an * Ethernet Interface */ void ql_del_hw_if(qla_host_t *ha) { uint32_t i; uint32_t num_msix; (void)qla_stop_nic_func(ha); qla_del_rcv_cntxt(ha); - qla_del_xmt_cntxt(ha); + if(qla_del_xmt_cntxt(ha)) + goto ql_del_hw_if_exit; if (ha->hw.flags.init_intr_cnxt) { for (i = 0; i < ha->hw.num_sds_rings; ) { if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) num_msix = Q8_MAX_INTR_VECTORS; else num_msix = ha->hw.num_sds_rings - i; - qla_config_intr_cntxt(ha, i, num_msix, 0); + if (qla_config_intr_cntxt(ha, i, num_msix, 0)) + break; + i += num_msix; } ha->hw.flags.init_intr_cnxt = 0; } +ql_del_hw_if_exit: if (ha->hw.enable_soft_lro) { qla_drain_soft_lro(ha); qla_free_soft_lro(ha); } return; } void qla_confirm_9kb_enable(qla_host_t *ha) { uint32_t supports_9kb = 0; ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX); /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */ WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2); WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); qla_get_nic_partition(ha, &supports_9kb, NULL); if (!supports_9kb) ha->hw.enable_9kb = 0; return; } /* * Name: ql_init_hw_if * Function: Creates the hardware specific entities corresponding to an * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address * corresponding to the interface. Enables LRO if allowed. */ int ql_init_hw_if(qla_host_t *ha) { device_t dev; uint32_t i; uint8_t bcast_mac[6]; qla_rdesc_t *rdesc; uint32_t num_msix; dev = ha->pci_dev; for (i = 0; i < ha->hw.num_sds_rings; i++) { bzero(ha->hw.dma_buf.sds_ring[i].dma_b, ha->hw.dma_buf.sds_ring[i].size); } for (i = 0; i < ha->hw.num_sds_rings; ) { if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) num_msix = Q8_MAX_INTR_VECTORS; else num_msix = ha->hw.num_sds_rings - i; if (qla_config_intr_cntxt(ha, i, num_msix, 1)) { if (i > 0) { num_msix = i; for (i = 0; i < num_msix; ) { qla_config_intr_cntxt(ha, i, Q8_MAX_INTR_VECTORS, 0); i += Q8_MAX_INTR_VECTORS; } } return (-1); } i = i + num_msix; } ha->hw.flags.init_intr_cnxt = 1; /* * Create Receive Context */ if (qla_init_rcv_cntxt(ha)) { return (-1); } for (i = 0; i < ha->hw.num_rds_rings; i++) { rdesc = &ha->hw.rds[i]; rdesc->rx_next = NUM_RX_DESCRIPTORS - 2; rdesc->rx_in = 0; /* Update the RDS Producer Indices */ QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\ rdesc->rx_next); } /* * Create Transmit Context */ if (qla_init_xmt_cntxt(ha)) { qla_del_rcv_cntxt(ha); return (-1); } ha->hw.max_tx_segs = 0; if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1)) return(-1); ha->hw.flags.unicast_mac = 1; bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; if (qla_config_mac_addr(ha, bcast_mac, 1, 1)) return (-1); ha->hw.flags.bcast_mac = 1; /* * program any cached multicast addresses */ if (qla_hw_add_all_mcast(ha)) return (-1); if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id)) return (-1); if (qla_config_rss(ha, ha->hw.rcv_cntxt_id)) return (-1); if (qla_config_rss_ind_table(ha)) return (-1); if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1)) return (-1); if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id)) return (-1); if (ha->ifp->if_capenable & IFCAP_LRO) { if (ha->hw.enable_hw_lro) { ha->hw.enable_soft_lro = 0; if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id)) return (-1); } else { ha->hw.enable_soft_lro = 1; if (qla_config_soft_lro(ha)) return (-1); } } if (qla_init_nic_func(ha)) return (-1); if (qla_query_fw_dcbx_caps(ha)) return (-1); for (i = 0; i < ha->hw.num_sds_rings; i++) QL_ENABLE_INTERRUPTS(ha, i); return (0); } static int qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx) { device_t dev = ha->pci_dev; q80_rq_map_sds_to_rds_t *map_rings; q80_rsp_map_sds_to_rds_t *map_rings_rsp; uint32_t i, err; qla_hw_t *hw = &ha->hw; map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox; bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t)); map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS; map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2); map_rings->count_version |= Q8_MBX_CMD_VERSION; map_rings->cntxt_id = hw->rcv_cntxt_id; map_rings->num_rings = num_idx; for (i = 0; i < num_idx; i++) { map_rings->sds_rds[i].sds_ring = i + start_idx; map_rings->sds_rds[i].rds_ring = i + start_idx; } if (qla_mbx_cmd(ha, (uint32_t *)map_rings, (sizeof (q80_rq_map_sds_to_rds_t) >> 2), ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return (0); } /* * Name: qla_init_rcv_cntxt * Function: Creates the Receive Context. */ static int qla_init_rcv_cntxt(qla_host_t *ha) { q80_rq_rcv_cntxt_t *rcntxt; q80_rsp_rcv_cntxt_t *rcntxt_rsp; q80_stat_desc_t *sdesc; int i, j; qla_hw_t *hw = &ha->hw; device_t dev; uint32_t err; uint32_t rcntxt_sds_rings; uint32_t rcntxt_rds_rings; uint32_t max_idx; dev = ha->pci_dev; /* * Create Receive Context */ for (i = 0; i < hw->num_sds_rings; i++) { sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0]; for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) { sdesc->data[0] = 1ULL; sdesc->data[1] = 1ULL; } } rcntxt_sds_rings = hw->num_sds_rings; if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS; rcntxt_rds_rings = hw->num_rds_rings; if (hw->num_rds_rings > MAX_RDS_RING_SETS) rcntxt_rds_rings = MAX_RDS_RING_SETS; rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox; bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t))); rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT; rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2); rcntxt->count_version |= Q8_MBX_CMD_VERSION; rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW | Q8_RCV_CNTXT_CAP0_LRO | Q8_RCV_CNTXT_CAP0_HW_LRO | Q8_RCV_CNTXT_CAP0_RSS | Q8_RCV_CNTXT_CAP0_SGL_LRO; if (ha->hw.enable_9kb) rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO; else rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO; if (ha->hw.num_rds_rings > 1) { rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5); rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS; } else rcntxt->nrds_sets_rings = 0x1 | (1 << 5); rcntxt->nsds_rings = rcntxt_sds_rings; rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE; rcntxt->rcv_vpid = 0; for (i = 0; i < rcntxt_sds_rings; i++) { rcntxt->sds[i].paddr = qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); rcntxt->sds[i].size = qla_host_to_le32(NUM_STATUS_DESCRIPTORS); rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]); rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0); } for (i = 0; i < rcntxt_rds_rings; i++) { rcntxt->rds[i].paddr_std = qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); if (ha->hw.enable_9kb) rcntxt->rds[i].std_bsize = qla_host_to_le64(MJUM9BYTES); else rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); rcntxt->rds[i].std_nentries = qla_host_to_le32(NUM_RX_DESCRIPTORS); } if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, (sizeof (q80_rq_rcv_cntxt_t) >> 2), ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } for (i = 0; i < rcntxt_sds_rings; i++) { hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i]; } for (i = 0; i < rcntxt_rds_rings; i++) { hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std; } hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id; ha->hw.flags.init_rx_cnxt = 1; if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) { for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) { if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings) max_idx = MAX_RCNTXT_SDS_RINGS; else max_idx = hw->num_sds_rings - i; err = qla_add_rcv_rings(ha, i, max_idx); if (err) return -1; i += max_idx; } } if (hw->num_rds_rings > 1) { for (i = 0; i < hw->num_rds_rings; ) { if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings) max_idx = MAX_SDS_TO_RDS_MAP; else max_idx = hw->num_rds_rings - i; err = qla_map_sds_to_rds(ha, i, max_idx); if (err) return -1; i += max_idx; } } return (0); } static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds) { device_t dev = ha->pci_dev; q80_rq_add_rcv_rings_t *add_rcv; q80_rsp_add_rcv_rings_t *add_rcv_rsp; uint32_t i,j, err; qla_hw_t *hw = &ha->hw; add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox; bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t)); add_rcv->opcode = Q8_MBX_ADD_RX_RINGS; add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2); add_rcv->count_version |= Q8_MBX_CMD_VERSION; add_rcv->nrds_sets_rings = nsds | (1 << 5); add_rcv->nsds_rings = nsds; add_rcv->cntxt_id = hw->rcv_cntxt_id; for (i = 0; i < nsds; i++) { j = i + sds_idx; add_rcv->sds[i].paddr = qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr); add_rcv->sds[i].size = qla_host_to_le32(NUM_STATUS_DESCRIPTORS); add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]); add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0); } for (i = 0; (i < nsds); i++) { j = i + sds_idx; add_rcv->rds[i].paddr_std = qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr); if (ha->hw.enable_9kb) add_rcv->rds[i].std_bsize = qla_host_to_le64(MJUM9BYTES); else add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); add_rcv->rds[i].std_nentries = qla_host_to_le32(NUM_RX_DESCRIPTORS); } if (qla_mbx_cmd(ha, (uint32_t *)add_rcv, (sizeof (q80_rq_add_rcv_rings_t) >> 2), ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } for (i = 0; i < nsds; i++) { hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i]; } for (i = 0; i < nsds; i++) { hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std; } return (0); } /* * Name: qla_del_rcv_cntxt * Function: Destroys the Receive Context. */ static void qla_del_rcv_cntxt(qla_host_t *ha) { device_t dev = ha->pci_dev; q80_rcv_cntxt_destroy_t *rcntxt; q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp; uint32_t err; uint8_t bcast_mac[6]; if (!ha->hw.flags.init_rx_cnxt) return; if (qla_hw_del_all_mcast(ha)) return; if (ha->hw.flags.bcast_mac) { bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; if (qla_config_mac_addr(ha, bcast_mac, 0, 1)) return; ha->hw.flags.bcast_mac = 0; } if (ha->hw.flags.unicast_mac) { if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1)) return; ha->hw.flags.unicast_mac = 0; } rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox; bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t))); rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT; rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2); rcntxt->count_version |= Q8_MBX_CMD_VERSION; rcntxt->cntxt_id = ha->hw.rcv_cntxt_id; if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, (sizeof (q80_rcv_cntxt_destroy_t) >> 2), ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return; } rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); } ha->hw.flags.init_rx_cnxt = 0; return; } /* * Name: qla_init_xmt_cntxt * Function: Creates the Transmit Context. */ static int qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) { device_t dev; qla_hw_t *hw = &ha->hw; q80_rq_tx_cntxt_t *tcntxt; q80_rsp_tx_cntxt_t *tcntxt_rsp; uint32_t err; qla_hw_tx_cntxt_t *hw_tx_cntxt; uint32_t intr_idx; hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; dev = ha->pci_dev; /* * Create Transmit Context */ tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox; bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t))); tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT; tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2); tcntxt->count_version |= Q8_MBX_CMD_VERSION; intr_idx = txr_idx; #ifdef QL_ENABLE_ISCSI_TLV tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO | Q8_TX_CNTXT_CAP0_TC; if (txr_idx >= (ha->hw.num_tx_rings >> 1)) { tcntxt->traffic_class = 1; } intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1); #else tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO; #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ tcntxt->ntx_rings = 1; tcntxt->tx_ring[0].paddr = qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr); tcntxt->tx_ring[0].tx_consumer = qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr); tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS); tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]); tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0); hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS; hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0; *hw_tx_cntxt->tx_cons = 0; if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, (sizeof (q80_rq_tx_cntxt_t) >> 2), ha->hw.mbox, (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return -1; } hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index; hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id; if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0)) return (-1); return (0); } /* * Name: qla_del_xmt_cntxt * Function: Destroys the Transmit Context. */ static int qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) { device_t dev = ha->pci_dev; q80_tx_cntxt_destroy_t *tcntxt; q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp; uint32_t err; tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox; bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t))); tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT; tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2); tcntxt->count_version |= Q8_MBX_CMD_VERSION; tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id; if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, (sizeof (q80_tx_cntxt_destroy_t) >> 2), ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return (0); } -static void +static int qla_del_xmt_cntxt(qla_host_t *ha) { uint32_t i; + int ret = 0; if (!ha->hw.flags.init_tx_cnxt) - return; + return (ret); for (i = 0; i < ha->hw.num_tx_rings; i++) { - if (qla_del_xmt_cntxt_i(ha, i)) + if ((ret = qla_del_xmt_cntxt_i(ha, i)) != 0) break; } ha->hw.flags.init_tx_cnxt = 0; + + return (ret); } static int qla_init_xmt_cntxt(qla_host_t *ha) { uint32_t i, j; for (i = 0; i < ha->hw.num_tx_rings; i++) { if (qla_init_xmt_cntxt_i(ha, i) != 0) { - for (j = 0; j < i; j++) - qla_del_xmt_cntxt_i(ha, j); + for (j = 0; j < i; j++) { + if (qla_del_xmt_cntxt_i(ha, j)) + break; + } return (-1); } } ha->hw.flags.init_tx_cnxt = 1; return (0); } static int qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast) { int i, nmcast; uint32_t count = 0; uint8_t *mcast; nmcast = ha->hw.nmcast; QL_DPRINT2(ha, (ha->pci_dev, "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast)); mcast = ha->hw.mac_addr_arr; memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) { if ((ha->hw.mcast[i].addr[0] != 0) || (ha->hw.mcast[i].addr[1] != 0) || (ha->hw.mcast[i].addr[2] != 0) || (ha->hw.mcast[i].addr[3] != 0) || (ha->hw.mcast[i].addr[4] != 0) || (ha->hw.mcast[i].addr[5] != 0)) { bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN); mcast = mcast + ETHER_ADDR_LEN; count++; if (count == Q8_MAX_MAC_ADDRS) { if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast, count)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } count = 0; mcast = ha->hw.mac_addr_arr; memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); } nmcast--; } } if (count) { if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast, count)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } } QL_DPRINT2(ha, (ha->pci_dev, "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast)); return 0; } static int qla_hw_add_all_mcast(qla_host_t *ha) { int ret; ret = qla_hw_all_mcast(ha, 1); return (ret); } int qla_hw_del_all_mcast(qla_host_t *ha) { int ret; ret = qla_hw_all_mcast(ha, 0); bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS)); ha->hw.nmcast = 0; return (ret); } static int qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta) { int i; for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) return (0); /* its been already added */ } return (-1); } static int qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) { int i; for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { if ((ha->hw.mcast[i].addr[0] == 0) && (ha->hw.mcast[i].addr[1] == 0) && (ha->hw.mcast[i].addr[2] == 0) && (ha->hw.mcast[i].addr[3] == 0) && (ha->hw.mcast[i].addr[4] == 0) && (ha->hw.mcast[i].addr[5] == 0)) { bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN); ha->hw.nmcast++; mta = mta + ETHER_ADDR_LEN; nmcast--; if (nmcast == 0) break; } } return 0; } static int qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) { int i; for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) { ha->hw.mcast[i].addr[0] = 0; ha->hw.mcast[i].addr[1] = 0; ha->hw.mcast[i].addr[2] = 0; ha->hw.mcast[i].addr[3] = 0; ha->hw.mcast[i].addr[4] = 0; ha->hw.mcast[i].addr[5] = 0; ha->hw.nmcast--; mta = mta + ETHER_ADDR_LEN; nmcast--; if (nmcast == 0) break; } } return 0; } /* * Name: ql_hw_set_multi * Function: Sets the Multicast Addresses provided by the host O.S into the * hardware (for the given interface) */ int ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt, uint32_t add_mac) { uint8_t *mta = mcast_addr; int i; int ret = 0; uint32_t count = 0; uint8_t *mcast; mcast = ha->hw.mac_addr_arr; memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); for (i = 0; i < mcnt; i++) { if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) { if (add_mac) { if (qla_hw_mac_addr_present(ha, mta) != 0) { bcopy(mta, mcast, ETHER_ADDR_LEN); mcast = mcast + ETHER_ADDR_LEN; count++; } } else { if (qla_hw_mac_addr_present(ha, mta) == 0) { bcopy(mta, mcast, ETHER_ADDR_LEN); mcast = mcast + ETHER_ADDR_LEN; count++; } } } if (count == Q8_MAX_MAC_ADDRS) { if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac, count)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } if (add_mac) { qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count); } else { qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count); } count = 0; mcast = ha->hw.mac_addr_arr; memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); } mta += Q8_MAC_ADDR_LEN; } if (count) { if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac, count)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } if (add_mac) { qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count); } else { qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count); } } return (ret); } /* * Name: ql_hw_tx_done_locked * Function: Handle Transmit Completions */ void ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx) { qla_tx_buf_t *txb; qla_hw_t *hw = &ha->hw; uint32_t comp_idx, comp_count = 0; qla_hw_tx_cntxt_t *hw_tx_cntxt; hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; /* retrieve index of last entry in tx ring completed */ comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons)); while (comp_idx != hw_tx_cntxt->txr_comp) { txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp]; hw_tx_cntxt->txr_comp++; if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS) hw_tx_cntxt->txr_comp = 0; comp_count++; if (txb->m_head) { ha->ifp->if_opackets++; bus_dmamap_sync(ha->tx_tag, txb->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ha->tx_tag, txb->map); m_freem(txb->m_head); txb->m_head = NULL; } } hw_tx_cntxt->txr_free += comp_count; return; } void ql_update_link_state(qla_host_t *ha) { - uint32_t link_state; + uint32_t link_state = 0; uint32_t prev_link_state; - if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) { - ha->hw.link_up = 0; - return; - } - link_state = READ_REG32(ha, Q8_LINK_STATE); - prev_link_state = ha->hw.link_up; - if (ha->pci_func == 0) - ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0); - else - ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); + if (ha->ifp->if_drv_flags & IFF_DRV_RUNNING) { + link_state = READ_REG32(ha, Q8_LINK_STATE); + if (ha->pci_func == 0) { + link_state = (((link_state & 0xF) == 1)? 1 : 0); + } else { + link_state = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); + } + } + + atomic_store_rel_8(&ha->hw.link_up, (uint8_t)link_state); + if (prev_link_state != ha->hw.link_up) { if (ha->hw.link_up) { if_link_state_change(ha->ifp, LINK_STATE_UP); } else { if_link_state_change(ha->ifp, LINK_STATE_DOWN); } } return; } int ql_hw_check_health(qla_host_t *ha) { uint32_t val; ha->hw.health_count++; if (ha->hw.health_count < 500) return 0; ha->hw.health_count = 0; val = READ_REG32(ha, Q8_ASIC_TEMPERATURE); if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) || (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) { - device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n", - __func__, val); + device_printf(ha->pci_dev, "%s: Temperature Alert" + " at ts_usecs %ld ts_reg = 0x%08x\n", + __func__, qla_get_usec_timestamp(), val); + + if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_TEMP_FAILURE) + ha->hw.sp_log_stop = -1; + + QL_INITIATE_RECOVERY(ha); return -1; } val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT); if ((val != ha->hw.hbeat_value) && (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) { ha->hw.hbeat_value = val; ha->hw.hbeat_failure = 0; return 0; } ha->hw.hbeat_failure++; if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1)) device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n", __func__, val); if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */ return 0; - else - device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n", - __func__, val); + else { + uint32_t peg_halt_status1; + uint32_t peg_halt_status2; + peg_halt_status1 = READ_REG32(ha, Q8_PEG_HALT_STATUS1); + peg_halt_status2 = READ_REG32(ha, Q8_PEG_HALT_STATUS2); + + device_printf(ha->pci_dev, + "%s: Heartbeat Failue at ts_usecs = %ld " + "fw_heart_beat = 0x%08x " + "peg_halt_status1 = 0x%08x " + "peg_halt_status2 = 0x%08x\n", + __func__, qla_get_usec_timestamp(), val, + peg_halt_status1, peg_halt_status2); + + if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_HBEAT_FAILURE) + ha->hw.sp_log_stop = -1; + } + QL_INITIATE_RECOVERY(ha); + return -1; } static int qla_init_nic_func(qla_host_t *ha) { device_t dev; q80_init_nic_func_t *init_nic; q80_init_nic_func_rsp_t *init_nic_rsp; uint32_t err; dev = ha->pci_dev; init_nic = (q80_init_nic_func_t *)ha->hw.mbox; bzero(init_nic, sizeof(q80_init_nic_func_t)); init_nic->opcode = Q8_MBX_INIT_NIC_FUNC; init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2); init_nic->count_version |= Q8_MBX_CMD_VERSION; init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN; init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN; init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN; //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t)); if (qla_mbx_cmd(ha, (uint32_t *)init_nic, (sizeof (q80_init_nic_func_t) >> 2), ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox; // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t)); err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_stop_nic_func(qla_host_t *ha) { device_t dev; q80_stop_nic_func_t *stop_nic; q80_stop_nic_func_rsp_t *stop_nic_rsp; uint32_t err; dev = ha->pci_dev; stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox; bzero(stop_nic, sizeof(q80_stop_nic_func_t)); stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC; stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2); stop_nic->count_version |= Q8_MBX_CMD_VERSION; stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN; stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN; //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t)); if (qla_mbx_cmd(ha, (uint32_t *)stop_nic, (sizeof (q80_stop_nic_func_t) >> 2), ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox; //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t)); err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_query_fw_dcbx_caps(qla_host_t *ha) { device_t dev; q80_query_fw_dcbx_caps_t *fw_dcbx; q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp; uint32_t err; dev = ha->pci_dev; fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox; bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t)); fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS; fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2); fw_dcbx->count_version |= Q8_MBX_CMD_VERSION; ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t)); if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx, (sizeof (q80_query_fw_dcbx_caps_t) >> 2), ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox; ql_dump_buf8(ha, __func__, fw_dcbx_rsp, sizeof (q80_query_fw_dcbx_caps_rsp_t)); err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2, uint32_t aen_mb3, uint32_t aen_mb4) { device_t dev; q80_idc_ack_t *idc_ack; q80_idc_ack_rsp_t *idc_ack_rsp; uint32_t err; int count = 300; dev = ha->pci_dev; idc_ack = (q80_idc_ack_t *)ha->hw.mbox; bzero(idc_ack, sizeof(q80_idc_ack_t)); idc_ack->opcode = Q8_MBX_IDC_ACK; idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2); idc_ack->count_version |= Q8_MBX_CMD_VERSION; idc_ack->aen_mb1 = aen_mb1; idc_ack->aen_mb2 = aen_mb2; idc_ack->aen_mb3 = aen_mb3; idc_ack->aen_mb4 = aen_mb4; ha->hw.imd_compl= 0; if (qla_mbx_cmd(ha, (uint32_t *)idc_ack, (sizeof (q80_idc_ack_t) >> 2), ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return(-1); } while (count && !ha->hw.imd_compl) { qla_mdelay(__func__, 100); count--; } if (!count) return -1; else device_printf(dev, "%s: count %d\n", __func__, count); return (0); } static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits) { device_t dev; q80_set_port_cfg_t *pcfg; q80_set_port_cfg_rsp_t *pfg_rsp; uint32_t err; int count = 300; dev = ha->pci_dev; pcfg = (q80_set_port_cfg_t *)ha->hw.mbox; bzero(pcfg, sizeof(q80_set_port_cfg_t)); pcfg->opcode = Q8_MBX_SET_PORT_CONFIG; pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2); pcfg->count_version |= Q8_MBX_CMD_VERSION; pcfg->cfg_bits = cfg_bits; device_printf(dev, "%s: cfg_bits" " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" " [0x%x, 0x%x, 0x%x]\n", __func__, ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)); ha->hw.imd_compl= 0; if (qla_mbx_cmd(ha, (uint32_t *)pcfg, (sizeof (q80_set_port_cfg_t) >> 2), ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status); if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) { while (count && !ha->hw.imd_compl) { qla_mdelay(__func__, 100); count--; } if (count) { device_printf(dev, "%s: count %d\n", __func__, count); err = 0; } } if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return(-1); } return (0); } static int qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size) { uint32_t err; device_t dev = ha->pci_dev; q80_config_md_templ_size_t *md_size; q80_config_md_templ_size_rsp_t *md_size_rsp; #ifndef QL_LDFLASH_FW ql_minidump_template_hdr_t *hdr; hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump; *size = hdr->size_of_template; return (0); #endif /* #ifdef QL_LDFLASH_FW */ md_size = (q80_config_md_templ_size_t *) ha->hw.mbox; bzero(md_size, sizeof(q80_config_md_templ_size_t)); md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE; md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2); md_size->count_version |= Q8_MBX_CMD_VERSION; if (qla_mbx_cmd(ha, (uint32_t *) md_size, (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox, (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return (-1); } md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox; err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return(-1); } *size = md_size_rsp->templ_size; return (0); } static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits) { device_t dev; q80_get_port_cfg_t *pcfg; q80_get_port_cfg_rsp_t *pcfg_rsp; uint32_t err; dev = ha->pci_dev; pcfg = (q80_get_port_cfg_t *)ha->hw.mbox; bzero(pcfg, sizeof(q80_get_port_cfg_t)); pcfg->opcode = Q8_MBX_GET_PORT_CONFIG; pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2); pcfg->count_version |= Q8_MBX_CMD_VERSION; if (qla_mbx_cmd(ha, (uint32_t *)pcfg, (sizeof (q80_get_port_cfg_t) >> 2), ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return(-1); } device_printf(dev, "%s: [cfg_bits, port type]" " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" " [0x%x, 0x%x, 0x%x]\n", __func__, pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type, ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0) ); *cfg_bits = pcfg_rsp->cfg_bits; return (0); } int ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp) { struct ether_vlan_header *eh; uint16_t etype; struct ip *ip = NULL; struct ip6_hdr *ip6 = NULL; struct tcphdr *th = NULL; uint32_t hdrlen; uint32_t offset; uint8_t buf[sizeof(struct ip6_hdr)]; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { hdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } if (etype == ETHERTYPE_IP) { offset = (hdrlen + sizeof (struct ip)); if (mp->m_len >= offset) { ip = (struct ip *)(mp->m_data + hdrlen); } else { m_copydata(mp, hdrlen, sizeof (struct ip), buf); ip = (struct ip *)buf; } if (ip->ip_p == IPPROTO_TCP) { hdrlen += ip->ip_hl << 2; offset = hdrlen + 4; if (mp->m_len >= offset) { th = (struct tcphdr *)(mp->m_data + hdrlen);; } else { m_copydata(mp, hdrlen, 4, buf); th = (struct tcphdr *)buf; } } } else if (etype == ETHERTYPE_IPV6) { offset = (hdrlen + sizeof (struct ip6_hdr)); if (mp->m_len >= offset) { ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen); } else { m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf); ip6 = (struct ip6_hdr *)buf; } if (ip6->ip6_nxt == IPPROTO_TCP) { hdrlen += sizeof(struct ip6_hdr); offset = hdrlen + 4; if (mp->m_len >= offset) { th = (struct tcphdr *)(mp->m_data + hdrlen);; } else { m_copydata(mp, hdrlen, 4, buf); th = (struct tcphdr *)buf; } } } if (th != NULL) { if ((th->th_sport == htons(3260)) || (th->th_dport == htons(3260))) return 0; } return (-1); } void qla_hw_async_event(qla_host_t *ha) { switch (ha->hw.aen_mb0) { case 0x8101: (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2, ha->hw.aen_mb3, ha->hw.aen_mb4); break; default: break; } return; } #ifdef QL_LDFLASH_FW static int ql_get_minidump_template(qla_host_t *ha) { uint32_t err; device_t dev = ha->pci_dev; q80_config_md_templ_cmd_t *md_templ; q80_config_md_templ_cmd_rsp_t *md_templ_rsp; md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox; bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t))); md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT; md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2); md_templ->count_version |= Q8_MBX_CMD_VERSION; md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr; md_templ->buff_size = ha->hw.dma_buf.minidump.size; if (qla_mbx_cmd(ha, (uint32_t *) md_templ, (sizeof(q80_config_md_templ_cmd_t) >> 2), ha->hw.mbox, (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return (-1); } md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox; err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return (-1); } return (0); } #endif /* #ifdef QL_LDFLASH_FW */ /* * Minidump related functionality */ static int ql_parse_template(qla_host_t *ha); static uint32_t ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t *crb_entry, uint32_t * data_buff); static uint32_t ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry, uint32_t * data_buff); static uint32_t ql_pollrd_modify_write(qla_host_t *ha, ql_minidump_entry_rd_modify_wr_with_poll_t *entry, uint32_t *data_buff); static uint32_t ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, uint32_t * data_buff); static uint32_t ql_L1Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, uint32_t *data_buff); static uint32_t ql_rdocm(qla_host_t *ha, ql_minidump_entry_rdocm_t *ocmEntry, uint32_t *data_buff); static uint32_t ql_rdmem(qla_host_t *ha, ql_minidump_entry_rdmem_t *mem_entry, uint32_t *data_buff); static uint32_t ql_rdrom(qla_host_t *ha, ql_minidump_entry_rdrom_t *romEntry, uint32_t *data_buff); static uint32_t ql_rdmux(qla_host_t *ha, ql_minidump_entry_mux_t *muxEntry, uint32_t *data_buff); static uint32_t ql_rdmux2(qla_host_t *ha, ql_minidump_entry_mux2_t *muxEntry, uint32_t *data_buff); static uint32_t ql_rdqueue(qla_host_t *ha, ql_minidump_entry_queue_t *queueEntry, uint32_t *data_buff); static uint32_t ql_cntrl(qla_host_t *ha, ql_minidump_template_hdr_t *template_hdr, ql_minidump_entry_cntrl_t *crbEntry); static uint32_t ql_minidump_size(qla_host_t *ha) { uint32_t i, k; uint32_t size = 0; ql_minidump_template_hdr_t *hdr; hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b; i = 0x2; for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) { if (i & ha->hw.mdump_capture_mask) size += hdr->capture_size_array[k]; i = i << 1; } return (size); } static void ql_free_minidump_buffer(qla_host_t *ha) { if (ha->hw.mdump_buffer != NULL) { free(ha->hw.mdump_buffer, M_QLA83XXBUF); ha->hw.mdump_buffer = NULL; ha->hw.mdump_buffer_size = 0; } return; } static int ql_alloc_minidump_buffer(qla_host_t *ha) { ha->hw.mdump_buffer_size = ql_minidump_size(ha); if (!ha->hw.mdump_buffer_size) return (-1); ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF, M_NOWAIT); if (ha->hw.mdump_buffer == NULL) return (-1); return (0); } static void ql_free_minidump_template_buffer(qla_host_t *ha) { if (ha->hw.mdump_template != NULL) { free(ha->hw.mdump_template, M_QLA83XXBUF); ha->hw.mdump_template = NULL; ha->hw.mdump_template_size = 0; } return; } static int ql_alloc_minidump_template_buffer(qla_host_t *ha) { ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size; ha->hw.mdump_template = malloc(ha->hw.mdump_template_size, M_QLA83XXBUF, M_NOWAIT); if (ha->hw.mdump_template == NULL) return (-1); return (0); } static int ql_alloc_minidump_buffers(qla_host_t *ha) { int ret; ret = ql_alloc_minidump_template_buffer(ha); if (ret) return (ret); ret = ql_alloc_minidump_buffer(ha); if (ret) ql_free_minidump_template_buffer(ha); return (ret); } static uint32_t ql_validate_minidump_checksum(qla_host_t *ha) { uint64_t sum = 0; int count; uint32_t *template_buff; count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t); template_buff = ha->hw.dma_buf.minidump.dma_b; while (count-- > 0) { sum += *template_buff++; } while (sum >> 32) { sum = (sum & 0xFFFFFFFF) + (sum >> 32); } return (~sum); } int ql_minidump_init(qla_host_t *ha) { int ret = 0; uint32_t template_size = 0; device_t dev = ha->pci_dev; /* * Get Minidump Template Size */ ret = qla_get_minidump_tmplt_size(ha, &template_size); if (ret || (template_size == 0)) { device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret, template_size); return (-1); } /* * Allocate Memory for Minidump Template */ ha->hw.dma_buf.minidump.alignment = 8; ha->hw.dma_buf.minidump.size = template_size; #ifdef QL_LDFLASH_FW if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) { device_printf(dev, "%s: minidump dma alloc failed\n", __func__); return (-1); } ha->hw.dma_buf.flags.minidump = 1; /* * Retrieve Minidump Template */ ret = ql_get_minidump_template(ha); #else ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump; #endif /* #ifdef QL_LDFLASH_FW */ if (ret == 0) { ret = ql_validate_minidump_checksum(ha); if (ret == 0) { ret = ql_alloc_minidump_buffers(ha); if (ret == 0) ha->hw.mdump_init = 1; else device_printf(dev, "%s: ql_alloc_minidump_buffers" " failed\n", __func__); } else { device_printf(dev, "%s: ql_validate_minidump_checksum" " failed\n", __func__); } } else { device_printf(dev, "%s: ql_get_minidump_template failed\n", __func__); } if (ret) ql_minidump_free(ha); return (ret); } static void ql_minidump_free(qla_host_t *ha) { ha->hw.mdump_init = 0; if (ha->hw.dma_buf.flags.minidump) { ha->hw.dma_buf.flags.minidump = 0; ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump); } ql_free_minidump_template_buffer(ha); ql_free_minidump_buffer(ha); return; } void ql_minidump(qla_host_t *ha) { if (!ha->hw.mdump_init) return; if (ha->hw.mdump_done) return; - - ha->hw.mdump_start_seq_index = ql_stop_sequence(ha); + ha->hw.mdump_usec_ts = qla_get_usec_timestamp(); + ha->hw.mdump_start_seq_index = ql_stop_sequence(ha); bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size); bzero(ha->hw.mdump_template, ha->hw.mdump_template_size); bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template, ha->hw.mdump_template_size); ql_parse_template(ha); ql_start_sequence(ha, ha->hw.mdump_start_seq_index); ha->hw.mdump_done = 1; return; } /* * helper routines */ static void ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize) { if (esize != entry->hdr.entry_capture_size) { entry->hdr.entry_capture_size = esize; entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG; } return; } static int ql_parse_template(qla_host_t *ha) { uint32_t num_of_entries, buff_level, e_cnt, esize; uint32_t end_cnt, rv = 0; char *dump_buff, *dbuff; int sane_start = 0, sane_end = 0; ql_minidump_template_hdr_t *template_hdr; ql_minidump_entry_t *entry; uint32_t capture_mask; uint32_t dump_size; /* Setup parameters */ template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template; if (template_hdr->entry_type == TLHDR) sane_start = 1; dump_buff = (char *) ha->hw.mdump_buffer; num_of_entries = template_hdr->num_of_entries; entry = (ql_minidump_entry_t *) ((char *)template_hdr + template_hdr->first_entry_offset ); template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] = template_hdr->ocm_window_array[ha->pci_func]; template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func; capture_mask = ha->hw.mdump_capture_mask; dump_size = ha->hw.mdump_buffer_size; template_hdr->driver_capture_mask = capture_mask; QL_DPRINT80(ha, (ha->pci_dev, "%s: sane_start = %d num_of_entries = %d " "capture_mask = 0x%x dump_size = %d \n", __func__, sane_start, num_of_entries, capture_mask, dump_size)); for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) { /* * If the capture_mask of the entry does not match capture mask * skip the entry after marking the driver_flags indicator. */ if (!(entry->hdr.entry_capture_mask & capture_mask)) { entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; entry = (ql_minidump_entry_t *) ((char *) entry + entry->hdr.entry_size); continue; } /* * This is ONLY needed in implementations where * the capture buffer allocated is too small to capture * all of the required entries for a given capture mask. * We need to empty the buffer contents to a file * if possible, before processing the next entry * If the buff_full_flag is set, no further capture will happen * and all remaining non-control entries will be skipped. */ if (entry->hdr.entry_capture_size != 0) { if ((buff_level + entry->hdr.entry_capture_size) > dump_size) { /* Try to recover by emptying buffer to file */ entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; entry = (ql_minidump_entry_t *) ((char *) entry + entry->hdr.entry_size); continue; } } /* * Decode the entry type and process it accordingly */ switch (entry->hdr.entry_type) { case RDNOP: break; case RDEND: if (sane_end == 0) { end_cnt = e_cnt; } sane_end++; break; case RDCRB: dbuff = dump_buff + buff_level; esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case POLLRD: dbuff = dump_buff + buff_level; esize = ql_pollrd(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case POLLRDMWR: dbuff = dump_buff + buff_level; esize = ql_pollrd_modify_write(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case L2ITG: case L2DTG: case L2DAT: case L2INS: dbuff = dump_buff + buff_level; esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff); if (esize == -1) { entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; } else { ql_entry_err_chk(entry, esize); buff_level += esize; } break; case L1DAT: case L1INS: dbuff = dump_buff + buff_level; esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case RDOCM: dbuff = dump_buff + buff_level; esize = ql_rdocm(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case RDMEM: dbuff = dump_buff + buff_level; esize = ql_rdmem(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case BOARD: case RDROM: dbuff = dump_buff + buff_level; esize = ql_rdrom(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case RDMUX: dbuff = dump_buff + buff_level; esize = ql_rdmux(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case RDMUX2: dbuff = dump_buff + buff_level; esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case QUEUE: dbuff = dump_buff + buff_level; esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case CNTRL: if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) { entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; } break; default: entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; break; } /* next entry in the template */ entry = (ql_minidump_entry_t *) ((char *) entry + entry->hdr.entry_size); } if (!sane_start || (sane_end > 1)) { device_printf(ha->pci_dev, "\n%s: Template configuration error. Check Template\n", __func__); } QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n", __func__, template_hdr->num_of_entries)); return 0; } /* * Read CRB operation. */ static uint32_t ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry, uint32_t * data_buff) { int loop_cnt; int ret; uint32_t op_count, addr, stride, value = 0; addr = crb_entry->addr; op_count = crb_entry->op_count; stride = crb_entry->addr_stride; for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { ret = ql_rdwr_indreg32(ha, addr, &value, 1); if (ret) return (0); *data_buff++ = addr; *data_buff++ = value; addr = addr + stride; } /* * for testing purpose we return amount of data written */ return (op_count * (2 * sizeof(uint32_t))); } /* * Handle L2 Cache. */ static uint32_t ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, uint32_t * data_buff) { int i, k; int loop_cnt; int ret; uint32_t read_value; uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w; uint32_t tag_value, read_cnt; volatile uint8_t cntl_value_r; long timeout; uint32_t data; loop_cnt = cacheEntry->op_count; read_addr = cacheEntry->read_addr; cntrl_addr = cacheEntry->control_addr; cntl_value_w = (uint32_t) cacheEntry->write_value; tag_reg_addr = cacheEntry->tag_reg_addr; tag_value = cacheEntry->init_tag_value; read_cnt = cacheEntry->read_addr_cnt; for (i = 0; i < loop_cnt; i++) { ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); if (ret) return (0); if (cacheEntry->write_value != 0) { ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0); if (ret) return (0); } if (cacheEntry->poll_mask != 0) { timeout = cacheEntry->poll_wait; ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1); if (ret) return (0); cntl_value_r = (uint8_t)data; while ((cntl_value_r & cacheEntry->poll_mask) != 0) { if (timeout) { qla_mdelay(__func__, 1); timeout--; } else break; ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1); if (ret) return (0); cntl_value_r = (uint8_t)data; } if (!timeout) { /* Report timeout error. * core dump capture failed * Skip remaining entries. * Write buffer out to file * Use driver specific fields in template header * to report this error. */ return (-1); } } addr = read_addr; for (k = 0; k < read_cnt; k++) { ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); *data_buff++ = read_value; addr += cacheEntry->read_addr_stride; } tag_value += cacheEntry->tag_value_stride; } return (read_cnt * loop_cnt * sizeof(uint32_t)); } /* * Handle L1 Cache. */ static uint32_t ql_L1Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, uint32_t *data_buff) { int ret; int i, k; int loop_cnt; uint32_t read_value; uint32_t addr, read_addr, cntrl_addr, tag_reg_addr; uint32_t tag_value, read_cnt; uint32_t cntl_value_w; loop_cnt = cacheEntry->op_count; read_addr = cacheEntry->read_addr; cntrl_addr = cacheEntry->control_addr; cntl_value_w = (uint32_t) cacheEntry->write_value; tag_reg_addr = cacheEntry->tag_reg_addr; tag_value = cacheEntry->init_tag_value; read_cnt = cacheEntry->read_addr_cnt; for (i = 0; i < loop_cnt; i++) { ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0); if (ret) return (0); addr = read_addr; for (k = 0; k < read_cnt; k++) { ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); *data_buff++ = read_value; addr += cacheEntry->read_addr_stride; } tag_value += cacheEntry->tag_value_stride; } return (read_cnt * loop_cnt * sizeof(uint32_t)); } /* * Reading OCM memory */ static uint32_t ql_rdocm(qla_host_t *ha, ql_minidump_entry_rdocm_t *ocmEntry, uint32_t *data_buff) { int i, loop_cnt; volatile uint32_t addr; volatile uint32_t value; addr = ocmEntry->read_addr; loop_cnt = ocmEntry->op_count; for (i = 0; i < loop_cnt; i++) { value = READ_REG32(ha, addr); *data_buff++ = value; addr += ocmEntry->read_addr_stride; } return (loop_cnt * sizeof(value)); } /* * Read memory */ static uint32_t ql_rdmem(qla_host_t *ha, ql_minidump_entry_rdmem_t *mem_entry, uint32_t *data_buff) { int ret; int i, loop_cnt; volatile uint32_t addr; q80_offchip_mem_val_t val; addr = mem_entry->read_addr; /* size in bytes / 16 */ loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4); for (i = 0; i < loop_cnt; i++) { ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1); if (ret) return (0); *data_buff++ = val.data_lo; *data_buff++ = val.data_hi; *data_buff++ = val.data_ulo; *data_buff++ = val.data_uhi; addr += (sizeof(uint32_t) * 4); } return (loop_cnt * (sizeof(uint32_t) * 4)); } /* * Read Rom */ static uint32_t ql_rdrom(qla_host_t *ha, ql_minidump_entry_rdrom_t *romEntry, uint32_t *data_buff) { int ret; int i, loop_cnt; uint32_t addr; uint32_t value; addr = romEntry->read_addr; loop_cnt = romEntry->read_data_size; /* This is size in bytes */ loop_cnt /= sizeof(value); for (i = 0; i < loop_cnt; i++) { ret = ql_rd_flash32(ha, addr, &value); if (ret) return (0); *data_buff++ = value; addr += sizeof(value); } return (loop_cnt * sizeof(value)); } /* * Read MUX data */ static uint32_t ql_rdmux(qla_host_t *ha, ql_minidump_entry_mux_t *muxEntry, uint32_t *data_buff) { int ret; int loop_cnt; uint32_t read_value, sel_value; uint32_t read_addr, select_addr; select_addr = muxEntry->select_addr; sel_value = muxEntry->select_value; read_addr = muxEntry->read_addr; for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) { ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); if (ret) return (0); *data_buff++ = sel_value; *data_buff++ = read_value; sel_value += muxEntry->select_value_stride; } return (loop_cnt * (2 * sizeof(uint32_t))); } static uint32_t ql_rdmux2(qla_host_t *ha, ql_minidump_entry_mux2_t *muxEntry, uint32_t *data_buff) { int ret; int loop_cnt; uint32_t select_addr_1, select_addr_2; uint32_t select_value_1, select_value_2; uint32_t select_value_count, select_value_mask; uint32_t read_addr, read_value; select_addr_1 = muxEntry->select_addr_1; select_addr_2 = muxEntry->select_addr_2; select_value_1 = muxEntry->select_value_1; select_value_2 = muxEntry->select_value_2; select_value_count = muxEntry->select_value_count; select_value_mask = muxEntry->select_value_mask; read_addr = muxEntry->read_addr; for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count; loop_cnt++) { uint32_t temp_sel_val; ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0); if (ret) return (0); temp_sel_val = select_value_1 & select_value_mask; ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); if (ret) return (0); *data_buff++ = temp_sel_val; *data_buff++ = read_value; ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0); if (ret) return (0); temp_sel_val = select_value_2 & select_value_mask; ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); if (ret) return (0); *data_buff++ = temp_sel_val; *data_buff++ = read_value; select_value_1 += muxEntry->select_value_stride; select_value_2 += muxEntry->select_value_stride; } return (loop_cnt * (4 * sizeof(uint32_t))); } /* * Handling Queue State Reads. */ static uint32_t ql_rdqueue(qla_host_t *ha, ql_minidump_entry_queue_t *queueEntry, uint32_t *data_buff) { int ret; int loop_cnt, k; uint32_t read_value; uint32_t read_addr, read_stride, select_addr; uint32_t queue_id, read_cnt; read_cnt = queueEntry->read_addr_cnt; read_stride = queueEntry->read_addr_stride; select_addr = queueEntry->select_addr; for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count; loop_cnt++) { ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0); if (ret) return (0); read_addr = queueEntry->read_addr; for (k = 0; k < read_cnt; k++) { ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); if (ret) return (0); *data_buff++ = read_value; read_addr += read_stride; } queue_id += queueEntry->queue_id_stride; } return (loop_cnt * (read_cnt * sizeof(uint32_t))); } /* * Handling control entries. */ static uint32_t ql_cntrl(qla_host_t *ha, ql_minidump_template_hdr_t *template_hdr, ql_minidump_entry_cntrl_t *crbEntry) { int ret; int count; uint32_t opcode, read_value, addr, entry_addr; long timeout; entry_addr = crbEntry->addr; for (count = 0; count < crbEntry->op_count; count++) { opcode = crbEntry->opcode; if (opcode & QL_DBG_OPCODE_WR) { ret = ql_rdwr_indreg32(ha, entry_addr, &crbEntry->value_1, 0); if (ret) return (0); opcode &= ~QL_DBG_OPCODE_WR; } if (opcode & QL_DBG_OPCODE_RW) { ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); if (ret) return (0); ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); if (ret) return (0); opcode &= ~QL_DBG_OPCODE_RW; } if (opcode & QL_DBG_OPCODE_AND) { ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); if (ret) return (0); read_value &= crbEntry->value_2; opcode &= ~QL_DBG_OPCODE_AND; if (opcode & QL_DBG_OPCODE_OR) { read_value |= crbEntry->value_3; opcode &= ~QL_DBG_OPCODE_OR; } ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); if (ret) return (0); } if (opcode & QL_DBG_OPCODE_OR) { ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); if (ret) return (0); read_value |= crbEntry->value_3; ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); if (ret) return (0); opcode &= ~QL_DBG_OPCODE_OR; } if (opcode & QL_DBG_OPCODE_POLL) { opcode &= ~QL_DBG_OPCODE_POLL; timeout = crbEntry->poll_timeout; addr = entry_addr; ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); while ((read_value & crbEntry->value_2) != crbEntry->value_1) { if (timeout) { qla_mdelay(__func__, 1); timeout--; } else break; ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); } if (!timeout) { /* * Report timeout error. * core dump capture failed * Skip remaining entries. * Write buffer out to file * Use driver specific fields in template header * to report this error. */ return (-1); } } if (opcode & QL_DBG_OPCODE_RDSTATE) { /* * decide which address to use. */ if (crbEntry->state_index_a) { addr = template_hdr->saved_state_array[ crbEntry-> state_index_a]; } else { addr = entry_addr; } ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); template_hdr->saved_state_array[crbEntry->state_index_v] = read_value; opcode &= ~QL_DBG_OPCODE_RDSTATE; } if (opcode & QL_DBG_OPCODE_WRSTATE) { /* * decide which value to use. */ if (crbEntry->state_index_v) { read_value = template_hdr->saved_state_array[ crbEntry->state_index_v]; } else { read_value = crbEntry->value_1; } /* * decide which address to use. */ if (crbEntry->state_index_a) { addr = template_hdr->saved_state_array[ crbEntry-> state_index_a]; } else { addr = entry_addr; } ret = ql_rdwr_indreg32(ha, addr, &read_value, 0); if (ret) return (0); opcode &= ~QL_DBG_OPCODE_WRSTATE; } if (opcode & QL_DBG_OPCODE_MDSTATE) { /* Read value from saved state using index */ read_value = template_hdr->saved_state_array[ crbEntry->state_index_v]; read_value <<= crbEntry->shl; /*Shift left operation */ read_value >>= crbEntry->shr; /*Shift right operation */ if (crbEntry->value_2) { /* check if AND mask is provided */ read_value &= crbEntry->value_2; } read_value |= crbEntry->value_3; /* OR operation */ read_value += crbEntry->value_1; /* increment op */ /* Write value back to state area. */ template_hdr->saved_state_array[crbEntry->state_index_v] = read_value; opcode &= ~QL_DBG_OPCODE_MDSTATE; } entry_addr += crbEntry->addr_stride; } return (0); } /* * Handling rd poll entry. */ static uint32_t ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry, uint32_t *data_buff) { int ret; int loop_cnt; uint32_t op_count, select_addr, select_value_stride, select_value; uint32_t read_addr, poll, mask, data_size, data; uint32_t wait_count = 0; select_addr = entry->select_addr; read_addr = entry->read_addr; select_value = entry->select_value; select_value_stride = entry->select_value_stride; op_count = entry->op_count; poll = entry->poll; mask = entry->mask; data_size = entry->data_size; for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0); if (ret) return (0); wait_count = 0; while (wait_count < poll) { uint32_t temp; ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1); if (ret) return (0); if ( (temp & mask) != 0 ) { break; } wait_count++; } if (wait_count == poll) { device_printf(ha->pci_dev, "%s: Error in processing entry\n", __func__); device_printf(ha->pci_dev, "%s: wait_count <0x%x> poll <0x%x>\n", __func__, wait_count, poll); return 0; } ret = ql_rdwr_indreg32(ha, read_addr, &data, 1); if (ret) return (0); *data_buff++ = select_value; *data_buff++ = data; select_value = select_value + select_value_stride; } /* * for testing purpose we return amount of data written */ return (loop_cnt * (2 * sizeof(uint32_t))); } /* * Handling rd modify write poll entry. */ static uint32_t ql_pollrd_modify_write(qla_host_t *ha, ql_minidump_entry_rd_modify_wr_with_poll_t *entry, uint32_t *data_buff) { int ret; uint32_t addr_1, addr_2, value_1, value_2, data; uint32_t poll, mask, data_size, modify_mask; uint32_t wait_count = 0; addr_1 = entry->addr_1; addr_2 = entry->addr_2; value_1 = entry->value_1; value_2 = entry->value_2; poll = entry->poll; mask = entry->mask; modify_mask = entry->modify_mask; data_size = entry->data_size; ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0); if (ret) return (0); wait_count = 0; while (wait_count < poll) { uint32_t temp; ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); if (ret) return (0); if ( (temp & mask) != 0 ) { break; } wait_count++; } if (wait_count == poll) { device_printf(ha->pci_dev, "%s Error in processing entry\n", __func__); } else { ret = ql_rdwr_indreg32(ha, addr_2, &data, 1); if (ret) return (0); data = (data & modify_mask); ret = ql_rdwr_indreg32(ha, addr_2, &data, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0); if (ret) return (0); /* Poll again */ wait_count = 0; while (wait_count < poll) { uint32_t temp; ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); if (ret) return (0); if ( (temp & mask) != 0 ) { break; } wait_count++; } *data_buff++ = addr_2; *data_buff++ = data; } /* * for testing purpose we return amount of data written */ return (2 * sizeof(uint32_t)); } Index: stable/9/sys/dev/qlxgbe/ql_hw.h =================================================================== --- stable/9/sys/dev/qlxgbe/ql_hw.h (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_hw.h (revision 330557) @@ -1,1764 +1,1780 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * File: ql_hw.h * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #ifndef _QL_HW_H_ #define _QL_HW_H_ /* * PCIe Registers; Direct Mapped; Offsets from BAR0 */ /* * Register offsets for QLE8030 */ /* * Firmware Mailbox Registers * 0 thru 511; offsets 0x800 thru 0xFFC; 32bits each */ #define Q8_FW_MBOX0 0x00000800 #define Q8_FW_MBOX511 0x00000FFC /* * Host Mailbox Registers * 0 thru 511; offsets 0x000 thru 0x7FC; 32bits each */ #define Q8_HOST_MBOX0 0x00000000 #define Q8_HOST_MBOX511 0x000007FC #define Q8_MBOX_INT_ENABLE 0x00001000 #define Q8_MBOX_INT_MASK_MSIX 0x00001200 #define Q8_MBOX_INT_LEGACY 0x00003010 #define Q8_HOST_MBOX_CNTRL 0x00003038 #define Q8_FW_MBOX_CNTRL 0x0000303C #define Q8_PEG_HALT_STATUS1 0x000034A8 #define Q8_PEG_HALT_STATUS2 0x000034AC #define Q8_FIRMWARE_HEARTBEAT 0x000034B0 #define Q8_FLASH_LOCK_ID 0x00003500 #define Q8_DRIVER_LOCK_ID 0x00003504 #define Q8_FW_CAPABILITIES 0x00003528 #define Q8_FW_VER_MAJOR 0x00003550 #define Q8_FW_VER_MINOR 0x00003554 #define Q8_FW_VER_SUB 0x00003558 #define Q8_BOOTLD_ADDR 0x0000355C #define Q8_BOOTLD_SIZE 0x00003560 #define Q8_FW_IMAGE_ADDR 0x00003564 #define Q8_FW_BUILD_NUMBER 0x00003568 #define Q8_FW_IMAGE_VALID 0x000035FC #define Q8_CMDPEG_STATE 0x00003650 #define Q8_LINK_STATE 0x00003698 #define Q8_LINK_STATE_2 0x0000369C #define Q8_LINK_SPEED_0 0x000036E0 #define Q8_LINK_SPEED_1 0x000036E4 #define Q8_LINK_SPEED_2 0x000036E8 #define Q8_LINK_SPEED_3 0x000036EC #define Q8_MAX_LINK_SPEED_0 0x000036F0 #define Q8_MAX_LINK_SPEED_1 0x000036F4 #define Q8_MAX_LINK_SPEED_2 0x000036F8 #define Q8_MAX_LINK_SPEED_3 0x000036FC #define Q8_ASIC_TEMPERATURE 0x000037B4 /* * CRB Window Registers * 0 thru 15; offsets 0x3800 thru 0x383C; 32bits each */ #define Q8_CRB_WINDOW_PF0 0x00003800 #define Q8_CRB_WINDOW_PF15 0x0000383C #define Q8_FLASH_LOCK 0x00003850 #define Q8_FLASH_UNLOCK 0x00003854 #define Q8_DRIVER_LOCK 0x00003868 #define Q8_DRIVER_UNLOCK 0x0000386C #define Q8_LEGACY_INT_PTR 0x000038C0 #define Q8_LEGACY_INT_TRIG 0x000038C4 #define Q8_LEGACY_INT_MASK 0x000038C8 #define Q8_WILD_CARD 0x000038F0 #define Q8_INFORMANT 0x000038FC /* * Ethernet Interface Specific Registers */ #define Q8_DRIVER_OP_MODE 0x00003570 #define Q8_API_VERSION 0x0000356C #define Q8_NPAR_STATE 0x0000359C /* * End of PCIe Registers; Direct Mapped; Offsets from BAR0 */ /* * Indirect Registers */ #define Q8_LED_DUAL_0 0x28084C80 #define Q8_LED_SINGLE_0 0x28084C90 #define Q8_LED_DUAL_1 0x28084CA0 #define Q8_LED_SINGLE_1 0x28084CB0 #define Q8_LED_DUAL_2 0x28084CC0 #define Q8_LED_SINGLE_2 0x28084CD0 #define Q8_LED_DUAL_3 0x28084CE0 #define Q8_LED_SINGLE_3 0x28084CF0 #define Q8_GPIO_1 0x28084D00 #define Q8_GPIO_2 0x28084D10 #define Q8_GPIO_3 0x28084D20 #define Q8_GPIO_4 0x28084D40 #define Q8_GPIO_5 0x28084D50 #define Q8_GPIO_6 0x28084D60 #define Q8_GPIO_7 0x42100060 #define Q8_GPIO_8 0x42100064 #define Q8_FLASH_SPI_STATUS 0x2808E010 #define Q8_FLASH_SPI_CONTROL 0x2808E014 #define Q8_FLASH_STATUS 0x42100004 #define Q8_FLASH_CONTROL 0x42110004 #define Q8_FLASH_ADDRESS 0x42110008 #define Q8_FLASH_WR_DATA 0x4211000C #define Q8_FLASH_RD_DATA 0x42110018 #define Q8_FLASH_DIRECT_WINDOW 0x42110030 #define Q8_FLASH_DIRECT_DATA 0x42150000 #define Q8_MS_CNTRL 0x41000090 #define Q8_MS_ADDR_LO 0x41000094 #define Q8_MS_ADDR_HI 0x41000098 #define Q8_MS_WR_DATA_0_31 0x410000A0 #define Q8_MS_WR_DATA_32_63 0x410000A4 #define Q8_MS_WR_DATA_64_95 0x410000B0 #define Q8_MS_WR_DATA_96_127 0x410000B4 #define Q8_MS_RD_DATA_0_31 0x410000A8 #define Q8_MS_RD_DATA_32_63 0x410000AC #define Q8_MS_RD_DATA_64_95 0x410000B8 #define Q8_MS_RD_DATA_96_127 0x410000BC #define Q8_CRB_PEG_0 0x3400003c #define Q8_CRB_PEG_1 0x3410003c #define Q8_CRB_PEG_2 0x3420003c #define Q8_CRB_PEG_3 0x3430003c #define Q8_CRB_PEG_4 0x34B0003c /* * Macros for reading and writing registers */ #if defined(__i386__) || defined(__amd64__) #define Q8_MB() __asm volatile("mfence" ::: "memory") #define Q8_WMB() __asm volatile("sfence" ::: "memory") #define Q8_RMB() __asm volatile("lfence" ::: "memory") #else #define Q8_MB() #define Q8_WMB() #define Q8_RMB() #endif #define READ_REG32(ha, reg) bus_read_4((ha->pci_reg), reg) #define WRITE_REG32(ha, reg, val) \ {\ bus_write_4((ha->pci_reg), reg, val);\ bus_read_4((ha->pci_reg), reg);\ } #define Q8_NUM_MBOX 512 #define Q8_MAX_NUM_MULTICAST_ADDRS 1022 #define Q8_MAC_ADDR_LEN 6 /* * Firmware Interface */ /* * Command Response Interface - Commands */ #define Q8_MBX_CONFIG_IP_ADDRESS 0x0001 #define Q8_MBX_CONFIG_INTR 0x0002 #define Q8_MBX_MAP_INTR_SRC 0x0003 #define Q8_MBX_MAP_SDS_TO_RDS 0x0006 #define Q8_MBX_CREATE_RX_CNTXT 0x0007 #define Q8_MBX_DESTROY_RX_CNTXT 0x0008 #define Q8_MBX_CREATE_TX_CNTXT 0x0009 #define Q8_MBX_DESTROY_TX_CNTXT 0x000A #define Q8_MBX_ADD_RX_RINGS 0x000B #define Q8_MBX_CONFIG_LRO_FLOW 0x000C #define Q8_MBX_CONFIG_MAC_LEARNING 0x000D #define Q8_MBX_GET_STATS 0x000F #define Q8_MBX_GENERATE_INTR 0x0011 #define Q8_MBX_SET_MAX_MTU 0x0012 #define Q8_MBX_MAC_ADDR_CNTRL 0x001F #define Q8_MBX_GET_PCI_CONFIG 0x0020 #define Q8_MBX_GET_NIC_PARTITION 0x0021 #define Q8_MBX_SET_NIC_PARTITION 0x0022 #define Q8_MBX_QUERY_WOL_CAP 0x002C #define Q8_MBX_SET_WOL_CONFIG 0x002D #define Q8_MBX_GET_MINIDUMP_TMPLT_SIZE 0x002F #define Q8_MBX_GET_MINIDUMP_TMPLT 0x0030 #define Q8_MBX_GET_FW_DCBX_CAPS 0x0034 #define Q8_MBX_QUERY_DCBX_SETTINGS 0x0035 #define Q8_MBX_CONFIG_RSS 0x0041 #define Q8_MBX_CONFIG_RSS_TABLE 0x0042 #define Q8_MBX_CONFIG_INTR_COALESCE 0x0043 #define Q8_MBX_CONFIG_LED 0x0044 #define Q8_MBX_CONFIG_MAC_ADDR 0x0045 #define Q8_MBX_CONFIG_STATISTICS 0x0046 #define Q8_MBX_CONFIG_LOOPBACK 0x0047 #define Q8_MBX_LINK_EVENT_REQ 0x0048 #define Q8_MBX_CONFIG_MAC_RX_MODE 0x0049 #define Q8_MBX_CONFIG_FW_LRO 0x004A #define Q8_MBX_HW_CONFIG 0x004C #define Q8_MBX_INIT_NIC_FUNC 0x0060 #define Q8_MBX_STOP_NIC_FUNC 0x0061 #define Q8_MBX_IDC_REQ 0x0062 #define Q8_MBX_IDC_ACK 0x0063 #define Q8_MBX_SET_PORT_CONFIG 0x0066 #define Q8_MBX_GET_PORT_CONFIG 0x0067 #define Q8_MBX_GET_LINK_STATUS 0x0068 /* * Mailbox Command Response */ #define Q8_MBX_RSP_SUCCESS 0x0001 #define Q8_MBX_RSP_RESPONSE_FAILURE 0x0002 #define Q8_MBX_RSP_NO_CARD_CRB 0x0003 #define Q8_MBX_RSP_NO_CARD_MEM 0x0004 #define Q8_MBX_RSP_NO_CARD_RSRC 0x0005 #define Q8_MBX_RSP_INVALID_ARGS 0x0006 #define Q8_MBX_RSP_INVALID_ACTION 0x0007 #define Q8_MBX_RSP_INVALID_STATE 0x0008 #define Q8_MBX_RSP_NOT_SUPPORTED 0x0009 #define Q8_MBX_RSP_NOT_PERMITTED 0x000A #define Q8_MBX_RSP_NOT_READY 0x000B #define Q8_MBX_RSP_DOES_NOT_EXIST 0x000C #define Q8_MBX_RSP_ALREADY_EXISTS 0x000D #define Q8_MBX_RSP_BAD_SIGNATURE 0x000E #define Q8_MBX_RSP_CMD_NOT_IMPLEMENTED 0x000F #define Q8_MBX_RSP_CMD_INVALID 0x0010 #define Q8_MBX_RSP_TIMEOUT 0x0011 #define Q8_MBX_RSP_CMD_FAILED 0x0012 #define Q8_MBX_RSP_FATAL_TEMP 0x0013 #define Q8_MBX_RSP_MAX_EXCEEDED 0x0014 #define Q8_MBX_RSP_UNSPECIFIED 0x0015 #define Q8_MBX_RSP_INTR_CREATE_FAILED 0x0017 #define Q8_MBX_RSP_INTR_DELETE_FAILED 0x0018 #define Q8_MBX_RSP_INTR_INVALID_OP 0x0019 #define Q8_MBX_RSP_IDC_INTRMD_RSP 0x001A #define Q8_MBX_CMD_VERSION (0x2 << 13) #define Q8_MBX_RSP_STATUS(x) (((!(x >> 9)) || ((x >> 9) == 1)) ? 0: (x >> 9)) /* * Configure IP Address */ typedef struct _q80_config_ip_addr { uint16_t opcode; uint16_t count_version; uint8_t cmd; #define Q8_MBX_CONFIG_IP_ADD_IP 0x1 #define Q8_MBX_CONFIG_IP_DEL_IP 0x2 uint8_t ip_type; #define Q8_MBX_CONFIG_IP_V4 0x0 #define Q8_MBX_CONFIG_IP_V6 0x1 uint16_t rsrvd; union { struct { uint32_t addr; uint32_t rsrvd[3]; } ipv4; uint8_t ipv6_addr[16]; } u; } __packed q80_config_ip_addr_t; typedef struct _q80_config_ip_addr_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_config_ip_addr_rsp_t; /* * Configure Interrupt Command */ typedef struct _q80_intr { uint8_t cmd_type; #define Q8_MBX_CONFIG_INTR_CREATE 0x1 #define Q8_MBX_CONFIG_INTR_DELETE 0x2 #define Q8_MBX_CONFIG_INTR_TYPE_LINE (0x1 << 4) #define Q8_MBX_CONFIG_INTR_TYPE_MSI_X (0x3 << 4) uint8_t rsrvd; uint16_t msix_index; } __packed q80_intr_t; #define Q8_MAX_INTR_VECTORS 16 typedef struct _q80_config_intr { uint16_t opcode; uint16_t count_version; uint8_t nentries; uint8_t rsrvd[3]; q80_intr_t intr[Q8_MAX_INTR_VECTORS]; } __packed q80_config_intr_t; typedef struct _q80_intr_rsp { uint8_t status; uint8_t cmd; uint16_t intr_id; uint32_t intr_src; } q80_intr_rsp_t; typedef struct _q80_config_intr_rsp { uint16_t opcode; uint16_t regcnt_status; uint8_t nentries; uint8_t rsrvd[3]; q80_intr_rsp_t intr[Q8_MAX_INTR_VECTORS]; } __packed q80_config_intr_rsp_t; /* * Configure LRO Flow Command */ typedef struct _q80_config_lro_flow { uint16_t opcode; uint16_t count_version; uint8_t cmd; #define Q8_MBX_CONFIG_LRO_FLOW_ADD 0x01 #define Q8_MBX_CONFIG_LRO_FLOW_DELETE 0x02 uint8_t type_ts; #define Q8_MBX_CONFIG_LRO_FLOW_IPV4 0x00 #define Q8_MBX_CONFIG_LRO_FLOW_IPV6 0x01 #define Q8_MBX_CONFIG_LRO_FLOW_TS_ABSENT 0x00 #define Q8_MBX_CONFIG_LRO_FLOW_TS_PRESENT 0x02 uint16_t rsrvd; union { struct { uint32_t addr; uint32_t rsrvd[3]; } ipv4; uint8_t ipv6_addr[16]; } dst; union { struct { uint32_t addr; uint32_t rsrvd[3]; } ipv4; uint8_t ipv6_addr[16]; } src; uint16_t dst_port; uint16_t src_port; } __packed q80_config_lro_flow_t; typedef struct _q80_config_lro_flow_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_config_lro_flow_rsp_t; typedef struct _q80_set_max_mtu { uint16_t opcode; uint16_t count_version; uint32_t cntxt_id; uint32_t mtu; } __packed q80_set_max_mtu_t; typedef struct _q80_set_max_mtu_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_set_max_mtu_rsp_t; /* * Configure RSS */ typedef struct _q80_config_rss { uint16_t opcode; uint16_t count_version; uint16_t cntxt_id; uint16_t rsrvd; uint8_t hash_type; #define Q8_MBX_RSS_HASH_TYPE_IPV4_IP (0x1 << 4) #define Q8_MBX_RSS_HASH_TYPE_IPV4_TCP (0x2 << 4) #define Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP (0x3 << 4) #define Q8_MBX_RSS_HASH_TYPE_IPV6_IP (0x1 << 6) #define Q8_MBX_RSS_HASH_TYPE_IPV6_TCP (0x2 << 6) #define Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP (0x3 << 6) uint8_t flags; #define Q8_MBX_RSS_FLAGS_ENABLE_RSS (0x1) #define Q8_MBX_RSS_FLAGS_USE_IND_TABLE (0x2) #define Q8_MBX_RSS_FLAGS_TYPE_CRSS (0x4) uint16_t indtbl_mask; #define Q8_MBX_RSS_INDTBL_MASK 0x7F #define Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID 0x8000 uint32_t multi_rss; #define Q8_MBX_RSS_MULTI_RSS_ENGINE_ASSIGN BIT_30 #define Q8_MBX_RSS_USE_MULTI_RSS_ENGINES BIT_31 uint64_t rss_key[5]; } __packed q80_config_rss_t; typedef struct _q80_config_rss_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_config_rss_rsp_t; /* * Configure RSS Indirection Table */ #define Q8_RSS_IND_TBL_SIZE 40 #define Q8_RSS_IND_TBL_MIN_IDX 0 #define Q8_RSS_IND_TBL_MAX_IDX 127 typedef struct _q80_config_rss_ind_table { uint16_t opcode; uint16_t count_version; uint8_t start_idx; uint8_t end_idx; uint16_t cntxt_id; uint8_t ind_table[Q8_RSS_IND_TBL_SIZE]; } __packed q80_config_rss_ind_table_t; typedef struct _q80_config_rss_ind_table_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_config_rss_ind_table_rsp_t; /* * Configure Interrupt Coalescing and Generation */ typedef struct _q80_config_intr_coalesc { uint16_t opcode; uint16_t count_version; uint16_t flags; #define Q8_MBX_INTRC_FLAGS_RCV 1 #define Q8_MBX_INTRC_FLAGS_XMT 2 #define Q8_MBX_INTRC_FLAGS_PERIODIC (1 << 3) uint16_t cntxt_id; uint16_t max_pkts; uint16_t max_mswait; uint8_t timer_type; #define Q8_MBX_INTRC_TIMER_NONE 0 #define Q8_MBX_INTRC_TIMER_SINGLE 1 #define Q8_MBX_INTRC_TIMER_PERIODIC 2 uint16_t sds_ring_mask; uint8_t rsrvd; uint32_t ms_timeout; } __packed q80_config_intr_coalesc_t; typedef struct _q80_config_intr_coalesc_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_config_intr_coalesc_rsp_t; /* * Configure MAC Address */ #define Q8_ETHER_ADDR_LEN 6 typedef struct _q80_mac_addr { uint8_t addr[Q8_ETHER_ADDR_LEN]; uint16_t vlan_tci; } __packed q80_mac_addr_t; #define Q8_MAX_MAC_ADDRS 64 typedef struct _q80_config_mac_addr { uint16_t opcode; uint16_t count_version; uint8_t cmd; #define Q8_MBX_CMAC_CMD_ADD_MAC_ADDR 1 #define Q8_MBX_CMAC_CMD_DEL_MAC_ADDR 2 #define Q8_MBX_CMAC_CMD_CAM_BOTH (0x0 << 6) #define Q8_MBX_CMAC_CMD_CAM_INGRESS (0x1 << 6) #define Q8_MBX_CMAC_CMD_CAM_EGRESS (0x2 << 6) uint8_t nmac_entries; uint16_t cntxt_id; q80_mac_addr_t mac_addr[Q8_MAX_MAC_ADDRS]; } __packed q80_config_mac_addr_t; typedef struct _q80_config_mac_addr_rsp { uint16_t opcode; uint16_t regcnt_status; uint8_t cmd; uint8_t nmac_entries; uint16_t cntxt_id; uint32_t status[Q8_MAX_MAC_ADDRS]; } __packed q80_config_mac_addr_rsp_t; /* * Configure MAC Receive Mode */ typedef struct _q80_config_mac_rcv_mode { uint16_t opcode; uint16_t count_version; uint8_t mode; #define Q8_MBX_MAC_RCV_PROMISC_ENABLE 0x1 #define Q8_MBX_MAC_ALL_MULTI_ENABLE 0x2 uint8_t rsrvd; uint16_t cntxt_id; } __packed q80_config_mac_rcv_mode_t; typedef struct _q80_config_mac_rcv_mode_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_config_mac_rcv_mode_rsp_t; /* * Configure Firmware Controlled LRO */ typedef struct _q80_config_fw_lro { uint16_t opcode; uint16_t count_version; uint8_t flags; #define Q8_MBX_FW_LRO_IPV4 0x1 #define Q8_MBX_FW_LRO_IPV6 0x2 #define Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK 0x4 #define Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK 0x8 #define Q8_MBX_FW_LRO_LOW_THRESHOLD 0x10 uint8_t rsrvd; uint16_t cntxt_id; uint16_t low_threshold; uint16_t rsrvd0; } __packed q80_config_fw_lro_t; typedef struct _q80_config_fw_lro_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_config_fw_lro_rsp_t; /* * Minidump mailbox commands */ typedef struct _q80_config_md_templ_size { uint16_t opcode; uint16_t count_version; } __packed q80_config_md_templ_size_t; typedef struct _q80_config_md_templ_size_rsp { uint16_t opcode; uint16_t regcnt_status; uint32_t rsrvd; uint32_t templ_size; uint32_t templ_version; } __packed q80_config_md_templ_size_rsp_t; typedef struct _q80_config_md_templ_cmd { uint16_t opcode; uint16_t count_version; uint64_t buf_addr; /* physical address of buffer */ uint32_t buff_size; uint32_t offset; } __packed q80_config_md_templ_cmd_t; typedef struct _q80_config_md_templ_cmd_rsp { uint16_t opcode; uint16_t regcnt_status; uint32_t rsrvd; uint32_t templ_size; uint32_t buff_size; uint32_t offset; } __packed q80_config_md_templ_cmd_rsp_t; /* * Hardware Configuration Commands */ typedef struct _q80_hw_config { uint16_t opcode; uint16_t count_version; #define Q8_HW_CONFIG_SET_MDIO_REG_COUNT 0x06 #define Q8_HW_CONFIG_GET_MDIO_REG_COUNT 0x05 #define Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT 0x03 #define Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT 0x02 #define Q8_HW_CONFIG_SET_TEMP_THRESHOLD_COUNT 0x03 #define Q8_HW_CONFIG_GET_TEMP_THRESHOLD_COUNT 0x02 #define Q8_HW_CONFIG_GET_ECC_COUNTS_COUNT 0x02 uint32_t cmd; #define Q8_HW_CONFIG_SET_MDIO_REG 0x01 #define Q8_HW_CONFIG_GET_MDIO_REG 0x02 #define Q8_HW_CONFIG_SET_CAM_SEARCH_MODE 0x03 #define Q8_HW_CONFIG_GET_CAM_SEARCH_MODE 0x04 #define Q8_HW_CONFIG_SET_TEMP_THRESHOLD 0x07 #define Q8_HW_CONFIG_GET_TEMP_THRESHOLD 0x08 #define Q8_HW_CONFIG_GET_ECC_COUNTS 0x0A union { struct { uint32_t phys_port_number; uint32_t phy_dev_addr; uint32_t reg_addr; uint32_t data; } set_mdio; struct { uint32_t phys_port_number; uint32_t phy_dev_addr; uint32_t reg_addr; } get_mdio; struct { uint32_t mode; #define Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL 0x1 #define Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO 0x2 } set_cam_search_mode; struct { uint32_t value; } set_temp_threshold; } u; } __packed q80_hw_config_t; typedef struct _q80_hw_config_rsp { uint16_t opcode; uint16_t regcnt_status; union { struct { uint32_t value; } get_mdio; struct { uint32_t mode; } get_cam_search_mode; struct { uint32_t temp_warn; uint32_t curr_temp; uint32_t osc_ring_rate; uint32_t core_voltage; } get_temp_threshold; struct { uint32_t ddr_ecc_error_count; uint32_t ocm_ecc_error_count; uint32_t l2_dcache_ecc_error_count; uint32_t l2_icache_ecc_error_count; uint32_t eport_ecc_error_count; } get_ecc_counts; } u; } __packed q80_hw_config_rsp_t; /* * Link Event Request Command */ typedef struct _q80_link_event { uint16_t opcode; uint16_t count_version; uint8_t cmd; #define Q8_LINK_EVENT_CMD_STOP_PERIODIC 0 #define Q8_LINK_EVENT_CMD_ENABLE_ASYNC 1 uint8_t flags; #define Q8_LINK_EVENT_FLAGS_SEND_RSP 1 uint16_t cntxt_id; } __packed q80_link_event_t; typedef struct _q80_link_event_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_link_event_rsp_t; /* * Get Statistics Command */ typedef struct _q80_rcv_stats { uint64_t total_bytes; uint64_t total_pkts; uint64_t lro_pkt_count; uint64_t sw_pkt_count; uint64_t ip_chksum_err; uint64_t pkts_wo_acntxts; uint64_t pkts_dropped_no_sds_card; uint64_t pkts_dropped_no_sds_host; uint64_t oversized_pkts; uint64_t pkts_dropped_no_rds; uint64_t unxpctd_mcast_pkts; uint64_t re1_fbq_error; uint64_t invalid_mac_addr; uint64_t rds_prime_trys; uint64_t rds_prime_success; uint64_t lro_flows_added; uint64_t lro_flows_deleted; uint64_t lro_flows_active; uint64_t pkts_droped_unknown; uint64_t pkts_cnt_oversized; } __packed q80_rcv_stats_t; typedef struct _q80_xmt_stats { uint64_t total_bytes; uint64_t total_pkts; uint64_t errors; uint64_t pkts_dropped; uint64_t switch_pkts; uint64_t num_buffers; } __packed q80_xmt_stats_t; typedef struct _q80_mac_stats { uint64_t xmt_frames; uint64_t xmt_bytes; uint64_t xmt_mcast_pkts; uint64_t xmt_bcast_pkts; uint64_t xmt_pause_frames; uint64_t xmt_cntrl_pkts; uint64_t xmt_pkt_lt_64bytes; uint64_t xmt_pkt_lt_127bytes; uint64_t xmt_pkt_lt_255bytes; uint64_t xmt_pkt_lt_511bytes; uint64_t xmt_pkt_lt_1023bytes; uint64_t xmt_pkt_lt_1518bytes; uint64_t xmt_pkt_gt_1518bytes; uint64_t rsrvd0[3]; uint64_t rcv_frames; uint64_t rcv_bytes; uint64_t rcv_mcast_pkts; uint64_t rcv_bcast_pkts; uint64_t rcv_pause_frames; uint64_t rcv_cntrl_pkts; uint64_t rcv_pkt_lt_64bytes; uint64_t rcv_pkt_lt_127bytes; uint64_t rcv_pkt_lt_255bytes; uint64_t rcv_pkt_lt_511bytes; uint64_t rcv_pkt_lt_1023bytes; uint64_t rcv_pkt_lt_1518bytes; uint64_t rcv_pkt_gt_1518bytes; uint64_t rsrvd1[3]; uint64_t rcv_len_error; uint64_t rcv_len_small; uint64_t rcv_len_large; uint64_t rcv_jabber; uint64_t rcv_dropped; uint64_t fcs_error; uint64_t align_error; uint64_t eswitched_frames; uint64_t eswitched_bytes; uint64_t eswitched_mcast_frames; uint64_t eswitched_bcast_frames; uint64_t eswitched_ucast_frames; uint64_t eswitched_err_free_frames; uint64_t eswitched_err_free_bytes; } __packed q80_mac_stats_t; typedef struct _q80_get_stats { uint16_t opcode; uint16_t count_version; uint32_t cmd; #define Q8_GET_STATS_CMD_CLEAR 0x01 #define Q8_GET_STATS_CMD_RCV 0x00 #define Q8_GET_STATS_CMD_XMT 0x02 #define Q8_GET_STATS_CMD_TYPE_CNTXT 0x00 #define Q8_GET_STATS_CMD_TYPE_MAC 0x04 #define Q8_GET_STATS_CMD_TYPE_FUNC 0x08 #define Q8_GET_STATS_CMD_TYPE_VPORT 0x0C #define Q8_GET_STATS_CMD_TYPE_ALL (0x7 << 2) } __packed q80_get_stats_t; typedef struct _q80_get_stats_rsp { uint16_t opcode; uint16_t regcnt_status; uint32_t cmd; union { q80_rcv_stats_t rcv; q80_xmt_stats_t xmt; q80_mac_stats_t mac; } u; } __packed q80_get_stats_rsp_t; typedef struct _q80_get_mac_rcv_xmt_stats_rsp { uint16_t opcode; uint16_t regcnt_status; uint32_t cmd; q80_mac_stats_t mac; q80_rcv_stats_t rcv; q80_xmt_stats_t xmt; } __packed q80_get_mac_rcv_xmt_stats_rsp_t; /* * Init NIC Function * Used to Register DCBX Configuration Change AEN */ typedef struct _q80_init_nic_func { uint16_t opcode; uint16_t count_version; uint32_t options; #define Q8_INIT_NIC_REG_IDC_AEN 0x01 #define Q8_INIT_NIC_REG_DCBX_CHNG_AEN 0x02 #define Q8_INIT_NIC_REG_SFP_CHNG_AEN 0x04 } __packed q80_init_nic_func_t; typedef struct _q80_init_nic_func_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_init_nic_func_rsp_t; /* * Stop NIC Function * Used to DeRegister DCBX Configuration Change AEN */ typedef struct _q80_stop_nic_func { uint16_t opcode; uint16_t count_version; uint32_t options; #define Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN 0x02 #define Q8_STOP_NIC_DEREG_SFP_CHNG_AEN 0x04 } __packed q80_stop_nic_func_t; typedef struct _q80_stop_nic_func_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_stop_nic_func_rsp_t; /* * Query Firmware DCBX Capabilities */ typedef struct _q80_query_fw_dcbx_caps { uint16_t opcode; uint16_t count_version; } __packed q80_query_fw_dcbx_caps_t; typedef struct _q80_query_fw_dcbx_caps_rsp { uint16_t opcode; uint16_t regcnt_status; uint32_t dcbx_caps; #define Q8_QUERY_FW_DCBX_CAPS_TSA 0x00000001 #define Q8_QUERY_FW_DCBX_CAPS_ETS 0x00000002 #define Q8_QUERY_FW_DCBX_CAPS_DCBX_CEE_1_01 0x00000004 #define Q8_QUERY_FW_DCBX_CAPS_DCBX_IEEE_1_0 0x00000008 #define Q8_QUERY_FW_DCBX_MAX_TC_MASK 0x00F00000 #define Q8_QUERY_FW_DCBX_MAX_ETS_TC_MASK 0x0F000000 #define Q8_QUERY_FW_DCBX_MAX_PFC_TC_MASK 0xF0000000 } __packed q80_query_fw_dcbx_caps_rsp_t; /* * IDC Ack Cmd */ typedef struct _q80_idc_ack { uint16_t opcode; uint16_t count_version; uint32_t aen_mb1; uint32_t aen_mb2; uint32_t aen_mb3; uint32_t aen_mb4; } __packed q80_idc_ack_t; typedef struct _q80_idc_ack_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_idc_ack_rsp_t; /* * Set Port Configuration command * Used to set Ethernet Standard Pause values */ typedef struct _q80_set_port_cfg { uint16_t opcode; uint16_t count_version; uint32_t cfg_bits; #define Q8_PORT_CFG_BITS_LOOPBACK_MODE_MASK (0x7 << 1) #define Q8_PORT_CFG_BITS_LOOPBACK_MODE_NONE (0x0 << 1) #define Q8_PORT_CFG_BITS_LOOPBACK_MODE_HSS (0x2 << 1) #define Q8_PORT_CFG_BITS_LOOPBACK_MODE_PHY (0x3 << 1) #define Q8_PORT_CFG_BITS_LOOPBACK_MODE_EXT (0x4 << 1) #define Q8_VALID_LOOPBACK_MODE(mode) \ (((mode) == Q8_PORT_CFG_BITS_LOOPBACK_MODE_NONE) || \ (((mode) >= Q8_PORT_CFG_BITS_LOOPBACK_MODE_HSS) && \ ((mode) <= Q8_PORT_CFG_BITS_LOOPBACK_MODE_EXT))) #define Q8_PORT_CFG_BITS_DCBX_ENABLE BIT_4 #define Q8_PORT_CFG_BITS_PAUSE_CFG_MASK (0x3 << 5) #define Q8_PORT_CFG_BITS_PAUSE_DISABLED (0x0 << 5) #define Q8_PORT_CFG_BITS_PAUSE_STD (0x1 << 5) #define Q8_PORT_CFG_BITS_PAUSE_PPM (0x2 << 5) #define Q8_PORT_CFG_BITS_LNKCAP_10MB BIT_8 #define Q8_PORT_CFG_BITS_LNKCAP_100MB BIT_9 #define Q8_PORT_CFG_BITS_LNKCAP_1GB BIT_10 #define Q8_PORT_CFG_BITS_LNKCAP_10GB BIT_11 #define Q8_PORT_CFG_BITS_AUTONEG BIT_15 #define Q8_PORT_CFG_BITS_XMT_DISABLE BIT_17 #define Q8_PORT_CFG_BITS_FEC_RQSTD BIT_18 #define Q8_PORT_CFG_BITS_EEE_RQSTD BIT_19 #define Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK (0x3 << 20) #define Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV (0x0 << 20) #define Q8_PORT_CFG_BITS_STDPAUSE_XMT (0x1 << 20) #define Q8_PORT_CFG_BITS_STDPAUSE_RCV (0x2 << 20) } __packed q80_set_port_cfg_t; typedef struct _q80_set_port_cfg_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_set_port_cfg_rsp_t; /* * Get Port Configuration Command */ typedef struct _q80_get_port_cfg { uint16_t opcode; uint16_t count_version; } __packed q80_get_port_cfg_t; typedef struct _q80_get_port_cfg_rsp { uint16_t opcode; uint16_t regcnt_status; uint32_t cfg_bits; /* same as in q80_set_port_cfg_t */ uint8_t phys_port_type; uint8_t rsvd[3]; } __packed q80_get_port_cfg_rsp_t; /* * Get Link Status Command * Used to get current PAUSE values for the port */ typedef struct _q80_get_link_status { uint16_t opcode; uint16_t count_version; } __packed q80_get_link_status_t; typedef struct _q80_get_link_status_rsp { uint16_t opcode; uint16_t regcnt_status; uint32_t cfg_bits; #define Q8_GET_LINK_STAT_CFG_BITS_LINK_UP BIT_0 #define Q8_GET_LINK_STAT_CFG_BITS_LINK_SPEED_MASK (0x7 << 3) #define Q8_GET_LINK_STAT_CFG_BITS_LINK_SPEED_UNKNOWN (0x0 << 3) #define Q8_GET_LINK_STAT_CFG_BITS_LINK_SPEED_10MB (0x1 << 3) #define Q8_GET_LINK_STAT_CFG_BITS_LINK_SPEED_100MB (0x2 << 3) #define Q8_GET_LINK_STAT_CFG_BITS_LINK_SPEED_1GB (0x3 << 3) #define Q8_GET_LINK_STAT_CFG_BITS_LINK_SPEED_10GB (0x4 << 3) #define Q8_GET_LINK_STAT_CFG_BITS_PAUSE_CFG_MASK (0x3 << 6) #define Q8_GET_LINK_STAT_CFG_BITS_PAUSE_CFG_DISABLE (0x0 << 6) #define Q8_GET_LINK_STAT_CFG_BITS_PAUSE_CFG_STD (0x1 << 6) #define Q8_GET_LINK_STAT_CFG_BITS_PAUSE_CFG_PPM (0x2 << 6) #define Q8_GET_LINK_STAT_CFG_BITS_LOOPBACK_MASK (0x7 << 8) #define Q8_GET_LINK_STAT_CFG_BITS_LOOPBACK_NONE (0x0 << 6) #define Q8_GET_LINK_STAT_CFG_BITS_LOOPBACK_HSS (0x2 << 6) #define Q8_GET_LINK_STAT_CFG_BITS_LOOPBACK_PHY (0x3 << 6) #define Q8_GET_LINK_STAT_CFG_BITS_FEC_ENABLED BIT_12 #define Q8_GET_LINK_STAT_CFG_BITS_EEE_ENABLED BIT_13 #define Q8_GET_LINK_STAT_CFG_BITS_STDPAUSE_DIR_MASK (0x3 << 20) #define Q8_GET_LINK_STAT_CFG_BITS_STDPAUSE_NONE (0x0 << 20) #define Q8_GET_LINK_STAT_CFG_BITS_STDPAUSE_XMT (0x1 << 20) #define Q8_GET_LINK_STAT_CFG_BITS_STDPAUSE_RCV (0x2 << 20) #define Q8_GET_LINK_STAT_CFG_BITS_STDPAUSE_XMT_RCV (0x3 << 20) uint32_t link_state; #define Q8_GET_LINK_STAT_LOSS_OF_SIGNAL BIT_0 #define Q8_GET_LINK_STAT_PORT_RST_DONE BIT_3 #define Q8_GET_LINK_STAT_PHY_LINK_DOWN BIT_4 #define Q8_GET_LINK_STAT_PCS_LINK_DOWN BIT_5 #define Q8_GET_LINK_STAT_MAC_LOCAL_FAULT BIT_6 #define Q8_GET_LINK_STAT_MAC_REMOTE_FAULT BIT_7 #define Q8_GET_LINK_STAT_XMT_DISABLED BIT_9 #define Q8_GET_LINK_STAT_SFP_XMT_FAULT BIT_10 uint32_t sfp_info; #define Q8_GET_LINK_STAT_SFP_TRNCVR_MASK 0x3 #define Q8_GET_LINK_STAT_SFP_TRNCVR_NOT_EXPECTED 0x0 #define Q8_GET_LINK_STAT_SFP_TRNCVR_NONE 0x1 #define Q8_GET_LINK_STAT_SFP_TRNCVR_INVALID 0x2 #define Q8_GET_LINK_STAT_SFP_TRNCVR_VALID 0x3 #define Q8_GET_LINK_STAT_SFP_ADDTL_INFO_MASK (0x3 << 2) #define Q8_GET_LINK_STAT_SFP_ADDTL_INFO_UNREC_TRSVR (0x0 << 2) #define Q8_GET_LINK_STAT_SFP_ADDTL_INFO_NOT_QLOGIC (0x1 << 2) #define Q8_GET_LINK_STAT_SFP_ADDTL_INFO_SPEED_FAILED (0x2 << 2) #define Q8_GET_LINK_STAT_SFP_ADDTL_INFO_ACCESS_ERROR (0x3 << 2) #define Q8_GET_LINK_STAT_SFP_MOD_TYPE_MASK (0x1F << 4) #define Q8_GET_LINK_STAT_SFP_MOD_NONE (0x00 << 4) #define Q8_GET_LINK_STAT_SFP_MOD_10GBLRM (0x01 << 4) #define Q8_GET_LINK_STAT_SFP_MOD_10GBLR (0x02 << 4) #define Q8_GET_LINK_STAT_SFP_MOD_10GBSR (0x03 << 4) #define Q8_GET_LINK_STAT_SFP_MOD_10GBC_P (0x04 << 4) #define Q8_GET_LINK_STAT_SFP_MOD_10GBC_AL (0x05 << 4) #define Q8_GET_LINK_STAT_SFP_MOD_10GBC_PL (0x06 << 4) #define Q8_GET_LINK_STAT_SFP_MOD_1GBSX (0x07 << 4) #define Q8_GET_LINK_STAT_SFP_MOD_1GBLX (0x08 << 4) #define Q8_GET_LINK_STAT_SFP_MOD_1GBCX (0x09 << 4) #define Q8_GET_LINK_STAT_SFP_MOD_1GBT (0x0A << 4) #define Q8_GET_LINK_STAT_SFP_MOD_1GBC_PL (0x0B << 4) #define Q8_GET_LINK_STAT_SFP_MOD_UNKNOWN (0x0F << 4) #define Q8_GET_LINK_STAT_SFP_MULTI_RATE_MOD BIT_9 #define Q8_GET_LINK_STAT_SFP_XMT_FAULT BIT_10 #define Q8_GET_LINK_STAT_SFP_COPPER_CBL_LENGTH_MASK (0xFF << 16) } __packed q80_get_link_status_rsp_t; /* * Transmit Related Definitions */ /* Max# of TX Rings per Tx Create Cntxt Mbx Cmd*/ #define MAX_TCNTXT_RINGS 8 /* * Transmit Context - Q8_CMD_CREATE_TX_CNTXT Command Configuration Data */ typedef struct _q80_rq_tx_ring { uint64_t paddr; uint64_t tx_consumer; uint16_t nentries; uint16_t intr_id; uint8_t intr_src_bit; uint8_t rsrvd[3]; } __packed q80_rq_tx_ring_t; typedef struct _q80_rq_tx_cntxt { uint16_t opcode; uint16_t count_version; uint32_t cap0; #define Q8_TX_CNTXT_CAP0_BASEFW (1 << 0) #define Q8_TX_CNTXT_CAP0_LSO (1 << 6) #define Q8_TX_CNTXT_CAP0_TC (1 << 25) uint32_t cap1; uint32_t cap2; uint32_t cap3; uint8_t ntx_rings; uint8_t traffic_class; /* bits 8-10; others reserved */ uint16_t tx_vpid; q80_rq_tx_ring_t tx_ring[MAX_TCNTXT_RINGS]; } __packed q80_rq_tx_cntxt_t; typedef struct _q80_rsp_tx_ring { uint32_t prod_index; uint16_t cntxt_id; uint8_t state; uint8_t rsrvd; } q80_rsp_tx_ring_t; typedef struct _q80_rsp_tx_cntxt { uint16_t opcode; uint16_t regcnt_status; uint8_t ntx_rings; uint8_t phy_port; uint8_t virt_port; uint8_t rsrvd; q80_rsp_tx_ring_t tx_ring[MAX_TCNTXT_RINGS]; } __packed q80_rsp_tx_cntxt_t; typedef struct _q80_tx_cntxt_destroy { uint16_t opcode; uint16_t count_version; uint32_t cntxt_id; } __packed q80_tx_cntxt_destroy_t; typedef struct _q80_tx_cntxt_destroy_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_tx_cntxt_destroy_rsp_t; /* * Transmit Command Descriptor * These commands are issued on the Transmit Ring associated with a Transmit * context */ typedef struct _q80_tx_cmd { uint8_t tcp_hdr_off; /* TCP Header Offset */ uint8_t ip_hdr_off; /* IP Header Offset */ uint16_t flags_opcode; /* Bits 0-6: flags; 7-12: opcode */ /* flags field */ #define Q8_TX_CMD_FLAGS_MULTICAST 0x01 #define Q8_TX_CMD_FLAGS_LSO_TSO 0x02 #define Q8_TX_CMD_FLAGS_VLAN_TAGGED 0x10 #define Q8_TX_CMD_FLAGS_HW_VLAN_ID 0x40 /* opcode field */ #define Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6 (0xC << 7) #define Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6 (0xB << 7) #define Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6 (0x6 << 7) #define Q8_TX_CMD_OP_XMT_TCP_LSO (0x5 << 7) #define Q8_TX_CMD_OP_XMT_UDP_CHKSUM (0x3 << 7) #define Q8_TX_CMD_OP_XMT_TCP_CHKSUM (0x2 << 7) #define Q8_TX_CMD_OP_XMT_ETHER (0x1 << 7) uint8_t n_bufs; /* # of data segs in data buffer */ uint8_t data_len_lo; /* data length lower 8 bits */ uint16_t data_len_hi; /* data length upper 16 bits */ uint64_t buf2_addr; /* buffer 2 address */ uint16_t rsrvd0; uint16_t mss; /* MSS for this packet */ uint8_t cntxtid; /* Bits 7-4: ContextId; 3-0: reserved */ #define Q8_TX_CMD_PORT_CNXTID(c_id) ((c_id & 0xF) << 4) uint8_t total_hdr_len; /* MAC+IP+TCP Header Length for LSO */ uint16_t rsrvd1; uint64_t buf3_addr; /* buffer 3 address */ uint64_t buf1_addr; /* buffer 1 address */ uint16_t buf1_len; /* length of buffer 1 */ uint16_t buf2_len; /* length of buffer 2 */ uint16_t buf3_len; /* length of buffer 3 */ uint16_t buf4_len; /* length of buffer 4 */ uint64_t buf4_addr; /* buffer 4 address */ uint32_t rsrvd2; uint16_t rsrvd3; uint16_t vlan_tci; /* VLAN TCI when hw tagging is enabled*/ } __packed q80_tx_cmd_t; /* 64 bytes */ #define Q8_TX_CMD_MAX_SEGMENTS 4 #define Q8_TX_CMD_TSO_ALIGN 2 #define Q8_TX_MAX_NON_TSO_SEGS 62 /* * Receive Related Definitions */ #define MAX_RDS_RING_SETS 8 /* Max# of Receive Descriptor Rings */ #ifdef QL_ENABLE_ISCSI_TLV #define MAX_SDS_RINGS 32 /* Max# of Status Descriptor Rings */ #define NUM_TX_RINGS (MAX_SDS_RINGS * 2) #else #define MAX_SDS_RINGS 32 /* Max# of Status Descriptor Rings */ #define NUM_TX_RINGS MAX_SDS_RINGS #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ #define MAX_RDS_RINGS MAX_SDS_RINGS /* Max# of Rcv Descriptor Rings */ typedef struct _q80_rq_sds_ring { uint64_t paddr; /* physical addr of status ring in system memory */ uint64_t hdr_split1; uint64_t hdr_split2; uint16_t size; /* number of entries in status ring */ uint16_t hdr_split1_size; uint16_t hdr_split2_size; uint16_t hdr_split_count; uint16_t intr_id; uint8_t intr_src_bit; uint8_t rsrvd[5]; } __packed q80_rq_sds_ring_t; /* 10 32bit words */ typedef struct _q80_rq_rds_ring { uint64_t paddr_std; /* physical addr of rcv ring in system memory */ uint64_t paddr_jumbo; /* physical addr of rcv ring in system memory */ uint16_t std_bsize; uint16_t std_nentries; uint16_t jumbo_bsize; uint16_t jumbo_nentries; } __packed q80_rq_rds_ring_t; /* 6 32bit words */ #define MAX_RCNTXT_SDS_RINGS 8 typedef struct _q80_rq_rcv_cntxt { uint16_t opcode; uint16_t count_version; uint32_t cap0; #define Q8_RCV_CNTXT_CAP0_BASEFW (1 << 0) #define Q8_RCV_CNTXT_CAP0_MULTI_RDS (1 << 1) #define Q8_RCV_CNTXT_CAP0_LRO (1 << 5) #define Q8_RCV_CNTXT_CAP0_HW_LRO (1 << 10) #define Q8_RCV_CNTXT_CAP0_VLAN_ALIGN (1 << 14) #define Q8_RCV_CNTXT_CAP0_RSS (1 << 15) #define Q8_RCV_CNTXT_CAP0_MSFT_RSS (1 << 16) #define Q8_RCV_CNTXT_CAP0_SGL_JUMBO (1 << 18) #define Q8_RCV_CNTXT_CAP0_SGL_LRO (1 << 19) #define Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO (1 << 26) uint32_t cap1; uint32_t cap2; uint32_t cap3; uint8_t nrds_sets_rings; uint8_t nsds_rings; uint16_t rds_producer_mode; #define Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE 0 #define Q8_RCV_CNTXT_RDS_PROD_MODE_SHARED 1 uint16_t rcv_vpid; uint16_t rsrvd0; uint32_t rsrvd1; q80_rq_sds_ring_t sds[MAX_RCNTXT_SDS_RINGS]; q80_rq_rds_ring_t rds[MAX_RDS_RING_SETS]; } __packed q80_rq_rcv_cntxt_t; typedef struct _q80_rsp_rds_ring { uint32_t prod_std; uint32_t prod_jumbo; } __packed q80_rsp_rds_ring_t; /* 8 bytes */ typedef struct _q80_rsp_rcv_cntxt { uint16_t opcode; uint16_t regcnt_status; uint8_t nrds_sets_rings; uint8_t nsds_rings; uint16_t cntxt_id; uint8_t state; uint8_t num_funcs; uint8_t phy_port; uint8_t virt_port; uint32_t sds_cons[MAX_RCNTXT_SDS_RINGS]; q80_rsp_rds_ring_t rds[MAX_RDS_RING_SETS]; } __packed q80_rsp_rcv_cntxt_t; typedef struct _q80_rcv_cntxt_destroy { uint16_t opcode; uint16_t count_version; uint32_t cntxt_id; } __packed q80_rcv_cntxt_destroy_t; typedef struct _q80_rcv_cntxt_destroy_rsp { uint16_t opcode; uint16_t regcnt_status; } __packed q80_rcv_cntxt_destroy_rsp_t; /* * Add Receive Rings */ typedef struct _q80_rq_add_rcv_rings { uint16_t opcode; uint16_t count_version; uint8_t nrds_sets_rings; uint8_t nsds_rings; uint16_t cntxt_id; q80_rq_sds_ring_t sds[MAX_RCNTXT_SDS_RINGS]; q80_rq_rds_ring_t rds[MAX_RDS_RING_SETS]; } __packed q80_rq_add_rcv_rings_t; typedef struct _q80_rsp_add_rcv_rings { uint16_t opcode; uint16_t regcnt_status; uint8_t nrds_sets_rings; uint8_t nsds_rings; uint16_t cntxt_id; uint32_t sds_cons[MAX_RCNTXT_SDS_RINGS]; q80_rsp_rds_ring_t rds[MAX_RDS_RING_SETS]; } __packed q80_rsp_add_rcv_rings_t; /* * Map Status Ring to Receive Descriptor Set */ #define MAX_SDS_TO_RDS_MAP 16 typedef struct _q80_sds_rds_map_e { uint8_t sds_ring; uint8_t rsrvd0; uint8_t rds_ring; uint8_t rsrvd1; } __packed q80_sds_rds_map_e_t; typedef struct _q80_rq_map_sds_to_rds { uint16_t opcode; uint16_t count_version; uint16_t cntxt_id; uint16_t num_rings; q80_sds_rds_map_e_t sds_rds[MAX_SDS_TO_RDS_MAP]; } __packed q80_rq_map_sds_to_rds_t; typedef struct _q80_rsp_map_sds_to_rds { uint16_t opcode; uint16_t regcnt_status; uint16_t cntxt_id; uint16_t num_rings; q80_sds_rds_map_e_t sds_rds[MAX_SDS_TO_RDS_MAP]; } __packed q80_rsp_map_sds_to_rds_t; /* * Receive Descriptor corresponding to each entry in the receive ring */ typedef struct _q80_rcv_desc { uint16_t handle; uint16_t rsrvd; uint32_t buf_size; /* buffer size in bytes */ uint64_t buf_addr; /* physical address of buffer */ } __packed q80_recv_desc_t; /* * Status Descriptor corresponding to each entry in the Status ring */ typedef struct _q80_stat_desc { uint64_t data[2]; } __packed q80_stat_desc_t; /* * definitions for data[0] field of Status Descriptor */ #define Q8_STAT_DESC_RSS_HASH(data) (data & 0xFFFFFFFF) #define Q8_STAT_DESC_TOTAL_LENGTH(data) ((data >> 32) & 0x3FFF) #define Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(data) ((data >> 32) & 0xFFFF) #define Q8_STAT_DESC_HANDLE(data) ((data >> 48) & 0xFFFF) /* * definitions for data[1] field of Status Descriptor */ #define Q8_STAT_DESC_OPCODE(data) ((data >> 42) & 0xF) #define Q8_STAT_DESC_OPCODE_RCV_PKT 0x01 #define Q8_STAT_DESC_OPCODE_LRO_PKT 0x02 #define Q8_STAT_DESC_OPCODE_SGL_LRO 0x04 #define Q8_STAT_DESC_OPCODE_SGL_RCV 0x05 #define Q8_STAT_DESC_OPCODE_CONT 0x06 /* * definitions for data[1] field of Status Descriptor for standard frames * status descriptor opcode equals 0x04 */ #define Q8_STAT_DESC_STATUS(data) ((data >> 39) & 0x0007) #define Q8_STAT_DESC_STATUS_CHKSUM_NOT_DONE 0x00 #define Q8_STAT_DESC_STATUS_NO_CHKSUM 0x01 #define Q8_STAT_DESC_STATUS_CHKSUM_OK 0x02 #define Q8_STAT_DESC_STATUS_CHKSUM_ERR 0x03 #define Q8_STAT_DESC_VLAN(data) ((data >> 47) & 1) #define Q8_STAT_DESC_VLAN_ID(data) ((data >> 48) & 0xFFFF) #define Q8_STAT_DESC_PROTOCOL(data) ((data >> 44) & 0x000F) #define Q8_STAT_DESC_L2_OFFSET(data) ((data >> 48) & 0x001F) #define Q8_STAT_DESC_COUNT(data) ((data >> 37) & 0x0007) /* * definitions for data[0-1] fields of Status Descriptor for LRO * status descriptor opcode equals 0x04 */ /* definitions for data[1] field */ #define Q8_LRO_STAT_DESC_SEQ_NUM(data) (uint32_t)(data) /* * definitions specific to opcode 0x04 data[1] */ #define Q8_STAT_DESC_COUNT_SGL_LRO(data) ((data >> 13) & 0x0007) #define Q8_SGL_LRO_STAT_L2_OFFSET(data) ((data >> 16) & 0xFF) #define Q8_SGL_LRO_STAT_L4_OFFSET(data) ((data >> 24) & 0xFF) #define Q8_SGL_LRO_STAT_TS(data) ((data >> 40) & 0x1) #define Q8_SGL_LRO_STAT_PUSH_BIT(data) ((data >> 41) & 0x1) /* * definitions specific to opcode 0x05 data[1] */ #define Q8_STAT_DESC_COUNT_SGL_RCV(data) ((data >> 37) & 0x0003) /* * definitions for opcode 0x06 */ /* definitions for data[0] field */ #define Q8_SGL_STAT_DESC_HANDLE1(data) (data & 0xFFFF) #define Q8_SGL_STAT_DESC_HANDLE2(data) ((data >> 16) & 0xFFFF) #define Q8_SGL_STAT_DESC_HANDLE3(data) ((data >> 32) & 0xFFFF) #define Q8_SGL_STAT_DESC_HANDLE4(data) ((data >> 48) & 0xFFFF) /* definitions for data[1] field */ #define Q8_SGL_STAT_DESC_HANDLE5(data) (data & 0xFFFF) #define Q8_SGL_STAT_DESC_HANDLE6(data) ((data >> 16) & 0xFFFF) #define Q8_SGL_STAT_DESC_NUM_HANDLES(data) ((data >> 32) & 0x7) #define Q8_SGL_STAT_DESC_HANDLE7(data) ((data >> 48) & 0xFFFF) /** Driver Related Definitions Begin **/ #define TX_SMALL_PKT_SIZE 128 /* size in bytes of small packets */ /* The number of descriptors should be a power of 2 */ #define NUM_TX_DESCRIPTORS 1024 #define NUM_STATUS_DESCRIPTORS 1024 #define NUM_RX_DESCRIPTORS 2048 /* * structure describing various dma buffers */ typedef struct qla_dmabuf { volatile struct { uint32_t tx_ring :1, rds_ring :1, sds_ring :1, minidump :1; } flags; qla_dma_t tx_ring; qla_dma_t rds_ring[MAX_RDS_RINGS]; qla_dma_t sds_ring[MAX_SDS_RINGS]; qla_dma_t minidump; } qla_dmabuf_t; typedef struct _qla_sds { q80_stat_desc_t *sds_ring_base; /* start of sds ring */ uint32_t sdsr_next; /* next entry in SDS ring to process */ struct lro_ctrl lro; void *rxb_free; uint32_t rx_free; volatile uint32_t rcv_active; uint32_t sds_consumer; uint64_t intr_count; uint64_t spurious_intr_count; } qla_sds_t; #define Q8_MAX_LRO_CONT_DESC 7 #define Q8_MAX_HANDLES_LRO (1 + (Q8_MAX_LRO_CONT_DESC * 7)) #define Q8_MAX_HANDLES_NON_LRO 8 typedef struct _qla_sgl_rcv { uint16_t pkt_length; uint16_t num_handles; uint16_t chksum_status; uint32_t rss_hash; uint16_t rss_hash_flags; uint16_t vlan_tag; uint16_t handle[Q8_MAX_HANDLES_NON_LRO]; } qla_sgl_rcv_t; typedef struct _qla_sgl_lro { uint16_t flags; #define Q8_LRO_COMP_TS 0x1 #define Q8_LRO_COMP_PUSH_BIT 0x2 uint16_t l2_offset; uint16_t l4_offset; uint16_t payload_length; uint16_t num_handles; uint32_t rss_hash; uint16_t rss_hash_flags; uint16_t vlan_tag; uint16_t handle[Q8_MAX_HANDLES_LRO]; } qla_sgl_lro_t; typedef union { qla_sgl_rcv_t rcv; qla_sgl_lro_t lro; } qla_sgl_comp_t; #define QL_FRAME_HDR_SIZE (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +\ sizeof (struct ip6_hdr) + sizeof (struct tcphdr) + 16) typedef struct _qla_hw_tx_cntxt { q80_tx_cmd_t *tx_ring_base; bus_addr_t tx_ring_paddr; volatile uint32_t *tx_cons; /* tx consumer shadow reg */ bus_addr_t tx_cons_paddr; volatile uint32_t txr_free; /* # of free entries in tx ring */ volatile uint32_t txr_next; /* # next available tx ring entry */ volatile uint32_t txr_comp; /* index of last tx entry completed */ uint32_t tx_prod_reg; uint16_t tx_cntxt_id; } qla_hw_tx_cntxt_t; typedef struct _qla_mcast { uint16_t rsrvd; uint8_t addr[ETHER_ADDR_LEN]; } __packed qla_mcast_t; typedef struct _qla_rdesc { volatile uint32_t prod_std; volatile uint32_t prod_jumbo; volatile uint32_t rx_next; /* next standard rcv ring to arm fw */ volatile int32_t rx_in; /* next standard rcv ring to add mbufs */ uint64_t count; uint64_t lro_pkt_count; uint64_t lro_bytes; } qla_rdesc_t; typedef struct _qla_flash_desc_table { uint32_t flash_valid; uint16_t flash_ver; uint16_t flash_len; uint16_t flash_cksum; uint16_t flash_unused; uint8_t flash_model[16]; uint16_t flash_manuf; uint16_t flash_id; uint8_t flash_flag; uint8_t erase_cmd; uint8_t alt_erase_cmd; uint8_t write_enable_cmd; uint8_t write_enable_bits; uint8_t write_statusreg_cmd; uint8_t unprotected_sec_cmd; uint8_t read_manuf_cmd; uint32_t block_size; uint32_t alt_block_size; uint32_t flash_size; uint32_t write_enable_data; uint8_t readid_addr_len; uint8_t write_disable_bits; uint8_t read_dev_id_len; uint8_t chip_erase_cmd; uint16_t read_timeo; uint8_t protected_sec_cmd; uint8_t resvd[65]; } __packed qla_flash_desc_table_t; /* * struct for storing hardware specific information for a given interface */ typedef struct _qla_hw { struct { uint32_t unicast_mac :1, bcast_mac :1, - loopback_mode :2, init_tx_cnxt :1, init_rx_cnxt :1, init_intr_cnxt :1, - fduplex :1, - autoneg :1, fdt_valid :1; } flags; - uint16_t link_speed; - uint16_t cable_length; - uint32_t cable_oui; - uint8_t link_up; - uint8_t module_type; - uint8_t link_faults; + volatile uint16_t link_speed; + volatile uint16_t cable_length; + volatile uint32_t cable_oui; + volatile uint8_t link_up; + volatile uint8_t module_type; + volatile uint8_t link_faults; + volatile uint8_t loopback_mode; + volatile uint8_t fduplex; + volatile uint8_t autoneg; - uint8_t mac_rcv_mode; + volatile uint8_t mac_rcv_mode; - uint32_t max_mtu; + volatile uint32_t max_mtu; uint8_t mac_addr[ETHER_ADDR_LEN]; uint32_t num_sds_rings; uint32_t num_rds_rings; uint32_t num_tx_rings; qla_dmabuf_t dma_buf; /* Transmit Side */ qla_hw_tx_cntxt_t tx_cntxt[NUM_TX_RINGS]; /* Receive Side */ uint16_t rcv_cntxt_id; uint32_t mbx_intr_mask_offset; uint16_t intr_id[MAX_SDS_RINGS]; uint32_t intr_src[MAX_SDS_RINGS]; qla_sds_t sds[MAX_SDS_RINGS]; uint32_t mbox[Q8_NUM_MBOX]; qla_rdesc_t rds[MAX_RDS_RINGS]; uint32_t rds_pidx_thres; uint32_t sds_cidx_thres; uint32_t rcv_intr_coalesce; uint32_t xmt_intr_coalesce; /* Immediate Completion */ volatile uint32_t imd_compl; volatile uint32_t aen_mb0; volatile uint32_t aen_mb1; volatile uint32_t aen_mb2; volatile uint32_t aen_mb3; volatile uint32_t aen_mb4; /* multicast address list */ uint32_t nmcast; qla_mcast_t mcast[Q8_MAX_NUM_MULTICAST_ADDRS]; uint8_t mac_addr_arr[(Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)]; /* reset sequence */ #define Q8_MAX_RESET_SEQ_IDX 16 uint32_t rst_seq[Q8_MAX_RESET_SEQ_IDX]; uint32_t rst_seq_idx; /* heart beat register value */ uint32_t hbeat_value; uint32_t health_count; uint32_t hbeat_failure; uint32_t max_tx_segs; uint32_t min_lro_pkt_size; uint32_t enable_hw_lro; uint32_t enable_soft_lro; uint32_t enable_9kb; uint32_t user_pri_nic; uint32_t user_pri_iscsi; /* Flash Descriptor Table */ qla_flash_desc_table_t fdt; /* stats */ q80_mac_stats_t mac; q80_rcv_stats_t rcv; q80_xmt_stats_t xmt[NUM_TX_RINGS]; /* Minidump Related */ uint32_t mdump_init; uint32_t mdump_done; uint32_t mdump_active; uint32_t mdump_capture_mask; uint32_t mdump_start_seq_index; void *mdump_buffer; uint32_t mdump_buffer_size; void *mdump_template; uint32_t mdump_template_size; + uint64_t mdump_usec_ts; +#define Q8_MBX_COMP_MSECS (19) + uint64_t mbx_comp_msecs[Q8_MBX_COMP_MSECS]; /* driver state related */ void *drvr_state; + + /* slow path trace */ + uint32_t sp_log_stop_events; +#define Q8_SP_LOG_STOP_HBEAT_FAILURE 0x001 +#define Q8_SP_LOG_STOP_TEMP_FAILURE 0x002 +#define Q8_SP_LOG_STOP_HW_INIT_FAILURE 0x004 +#define Q8_SP_LOG_STOP_IF_START_FAILURE 0x008 +#define Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE 0x010 + + uint32_t sp_log_stop; + uint32_t sp_log_index; + uint32_t sp_log_num_entries; + void *sp_log; } qla_hw_t; #define QL_UPDATE_RDS_PRODUCER_INDEX(ha, prod_reg, val) \ bus_write_4((ha->pci_reg), prod_reg, val); #define QL_UPDATE_TX_PRODUCER_INDEX(ha, val, i) \ WRITE_REG32(ha, ha->hw.tx_cntxt[i].tx_prod_reg, val) #define QL_UPDATE_SDS_CONSUMER_INDEX(ha, i, val) \ bus_write_4((ha->pci_reg), (ha->hw.sds[i].sds_consumer), val); #define QL_ENABLE_INTERRUPTS(ha, i) \ bus_write_4((ha->pci_reg), (ha->hw.intr_src[i]), 0); #define QL_BUFFER_ALIGN 16 /* * Flash Configuration */ #define Q8_BOARD_CONFIG_OFFSET 0x370000 #define Q8_BOARD_CONFIG_LENGTH 0x2000 #define Q8_BOARD_CONFIG_MAC0_LO 0x400 #define Q8_FDT_LOCK_MAGIC_ID 0x00FD00FD #define Q8_FDT_FLASH_ADDR_VAL 0xFD009F #define Q8_FDT_FLASH_CTRL_VAL 0x3F #define Q8_FDT_MASK_VAL 0xFF #define Q8_WR_ENABLE_FL_ADDR 0xFD0100 #define Q8_WR_ENABLE_FL_CTRL 0x5 #define Q8_ERASE_LOCK_MAGIC_ID 0x00EF00EF #define Q8_ERASE_FL_ADDR_MASK 0xFD0300 #define Q8_ERASE_FL_CTRL_MASK 0x3D #define Q8_WR_FL_LOCK_MAGIC_ID 0xABCDABCD #define Q8_WR_FL_ADDR_MASK 0x800000 #define Q8_WR_FL_CTRL_MASK 0x3D #define QL_FDT_OFFSET 0x3F0000 #define Q8_FLASH_SECTOR_SIZE 0x10000 /* * Off Chip Memory Access */ typedef struct _q80_offchip_mem_val { uint32_t data_lo; uint32_t data_hi; uint32_t data_ulo; uint32_t data_uhi; } q80_offchip_mem_val_t; #endif /* #ifndef _QL_HW_H_ */ Index: stable/9/sys/dev/qlxgbe/ql_inline.h =================================================================== --- stable/9/sys/dev/qlxgbe/ql_inline.h (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_inline.h (revision 330557) @@ -1,210 +1,215 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * File: ql_inline.h * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #ifndef _QL_INLINE_H_ #define _QL_INLINE_H_ #define QL8_SEMLOCK_TIMEOUT 1000/* QLA8020 Semaphore Lock Timeout 10ms */ /* * Inline functions for hardware semaphores */ /* * Name: qla_sem_lock * Function: Locks one of the semaphore registers (semaphore 2,3,5 & 7) * If the id_reg is valid, then id_val is written into it. * This is for debugging purpose * Returns: 0 on success; otherwise its failed. */ static __inline int qla_sem_lock(qla_host_t *ha, uint32_t sem_reg, uint32_t id_reg, uint32_t id_val) { int count = QL8_SEMLOCK_TIMEOUT; while (count) { if ((READ_REG32(ha, sem_reg) & BIT_0)) break; count--; if (!count) return(-1); qla_mdelay(__func__, 10); } if (id_reg) WRITE_REG32(ha, id_reg, id_val); return(0); } /* * Name: qla_sem_unlock * Function: Unlocks the semaphore registers (semaphore 2,3,5 & 7) * previously locked by qla_sem_lock() */ static __inline void qla_sem_unlock(qla_host_t *ha, uint32_t sem_reg) { READ_REG32(ha, sem_reg); } static __inline int qla_get_ifq_snd_maxlen(qla_host_t *ha) { return(((NUM_TX_DESCRIPTORS * 4) - 1)); } static __inline uint32_t qla_get_optics(qla_host_t *ha) { uint32_t link_speed; link_speed = READ_REG32(ha, Q8_LINK_SPEED_0); if (ha->pci_func == 0) link_speed = link_speed & 0xFF; else link_speed = (link_speed >> 8) & 0xFF; switch (link_speed) { case 0x1: link_speed = IFM_100_FX; break; case 0x10: link_speed = IFM_1000_SX; break; default: if ((ha->hw.module_type == 0x4) || (ha->hw.module_type == 0x5) || (ha->hw.module_type == 0x6)) link_speed = (IFM_10G_TWINAX); else link_speed = (IFM_10G_LR | IFM_10G_SR); break; } return(link_speed); } static __inline uint8_t * qla_get_mac_addr(qla_host_t *ha) { return (ha->hw.mac_addr); } static __inline void qla_set_hw_rcv_desc(qla_host_t *ha, uint32_t r_idx, uint32_t index, uint32_t handle, bus_addr_t paddr, uint32_t buf_size) { volatile q80_recv_desc_t *rcv_desc; rcv_desc = (q80_recv_desc_t *)ha->hw.dma_buf.rds_ring[r_idx].dma_b; rcv_desc += index; rcv_desc->handle = (uint16_t)handle; rcv_desc->buf_size = buf_size; rcv_desc->buf_addr = paddr; return; } static __inline void qla_init_hw_rcv_descriptors(qla_host_t *ha) { int i; for (i = 0; i < ha->hw.num_rds_rings; i++) bzero((void *)ha->hw.dma_buf.rds_ring[i].dma_b, (sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS)); } #define QLA_LOCK_DEFAULT_MS_TIMEOUT 3000 #ifndef QLA_LOCK_NO_SLEEP #define QLA_LOCK_NO_SLEEP 0 #endif static __inline int qla_lock(qla_host_t *ha, const char *str, uint32_t timeout_ms, uint32_t no_sleep) { int ret = -1; while (1) { mtx_lock(&ha->hw_lock); - if (ha->qla_detach_active) { + if (ha->qla_detach_active || ha->offline) { mtx_unlock(&ha->hw_lock); break; } if (!ha->hw_lock_held) { ha->hw_lock_held = 1; ha->qla_lock = str; ret = 0; mtx_unlock(&ha->hw_lock); break; } mtx_unlock(&ha->hw_lock); if (--timeout_ms == 0) { ha->hw_lock_failed++; break; } else { if (no_sleep) DELAY(1000); else qla_mdelay(__func__, 1); } } - //device_printf(ha->pci_dev, "%s: %s ret = %d\n", __func__, str,ret); +// if (!ha->enable_error_recovery) +// device_printf(ha->pci_dev, "%s: %s ret = %d\n", __func__, +// str,ret); + return (ret); } static __inline void qla_unlock(qla_host_t *ha, const char *str) { mtx_lock(&ha->hw_lock); ha->hw_lock_held = 0; ha->qla_unlock = str; mtx_unlock(&ha->hw_lock); - //device_printf(ha->pci_dev, "%s: %s\n", __func__, str); + +// if (!ha->enable_error_recovery) +// device_printf(ha->pci_dev, "%s: %s\n", __func__, str); return; } #endif /* #ifndef _QL_INLINE_H_ */ Index: stable/9/sys/dev/qlxgbe/ql_ioctl.c =================================================================== --- stable/9/sys/dev/qlxgbe/ql_ioctl.c (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_ioctl.c (revision 330557) @@ -1,525 +1,667 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: ql_ioctl.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #include __FBSDID("$FreeBSD$"); #include "ql_os.h" #include "ql_hw.h" #include "ql_def.h" #include "ql_inline.h" #include "ql_glbl.h" #include "ql_ioctl.h" #include "ql_ver.h" #include "ql_dbg.h" +static int ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log); static int ql_drvr_state(qla_host_t *ha, qla_driver_state_t *drvr_state); static uint32_t ql_drvr_state_size(qla_host_t *ha); static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td); static struct cdevsw qla_cdevsw = { .d_version = D_VERSION, .d_ioctl = ql_eioctl, .d_name = "qlcnic", }; int ql_make_cdev(qla_host_t *ha) { ha->ioctl_dev = make_dev(&qla_cdevsw, ha->ifp->if_dunit, UID_ROOT, GID_WHEEL, 0600, "%s", if_name(ha->ifp)); if (ha->ioctl_dev == NULL) return (-1); ha->ioctl_dev->si_drv1 = ha; return (0); } void ql_del_cdev(qla_host_t *ha) { if (ha->ioctl_dev != NULL) destroy_dev(ha->ioctl_dev); return; } static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { qla_host_t *ha; int rval = 0; device_t pci_dev; struct ifnet *ifp; int count; q80_offchip_mem_val_t val; qla_rd_pci_ids_t *pci_ids; qla_rd_fw_dump_t *fw_dump; union { qla_reg_val_t *rv; qla_rd_flash_t *rdf; qla_wr_flash_t *wrf; qla_erase_flash_t *erf; qla_offchip_mem_val_t *mem; } u; if ((ha = (qla_host_t *)dev->si_drv1) == NULL) return ENXIO; pci_dev= ha->pci_dev; switch(cmd) { case QLA_RDWR_REG: u.rv = (qla_reg_val_t *)data; if (u.rv->direct) { if (u.rv->rd) { u.rv->val = READ_REG32(ha, u.rv->reg); } else { WRITE_REG32(ha, u.rv->reg, u.rv->val); } } else { if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val, u.rv->rd))) rval = ENXIO; } break; case QLA_RD_FLASH: if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.rdf = (qla_rd_flash_t *)data; if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data))) rval = ENXIO; break; case QLA_WR_FLASH: ifp = ha->ifp; if (ifp == NULL) { rval = ENXIO; break; } if (ifp->if_drv_flags & IFF_DRV_RUNNING) { rval = ENXIO; break; } if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.wrf = (qla_wr_flash_t *)data; if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size, u.wrf->buffer))) { printf("flash write failed[%d]\n", rval); rval = ENXIO; } break; case QLA_ERASE_FLASH: ifp = ha->ifp; if (ifp == NULL) { rval = ENXIO; break; } if (ifp->if_drv_flags & IFF_DRV_RUNNING) { rval = ENXIO; break; } if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.erf = (qla_erase_flash_t *)data; if ((rval = ql_erase_flash(ha, u.erf->off, u.erf->size))) { printf("flash erase failed[%d]\n", rval); rval = ENXIO; } break; case QLA_RDWR_MS_MEM: u.mem = (qla_offchip_mem_val_t *)data; if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val, u.mem->rd))) rval = ENXIO; else { u.mem->data_lo = val.data_lo; u.mem->data_hi = val.data_hi; u.mem->data_ulo = val.data_ulo; u.mem->data_uhi = val.data_uhi; } break; case QLA_RD_FW_DUMP_SIZE: if (ha->hw.mdump_init == 0) { rval = EINVAL; break; } fw_dump = (qla_rd_fw_dump_t *)data; fw_dump->minidump_size = ha->hw.mdump_buffer_size + ha->hw.mdump_template_size; fw_dump->pci_func = ha->pci_func; break; case QLA_RD_FW_DUMP: if (ha->hw.mdump_init == 0) { + device_printf(pci_dev, "%s: minidump not initialized\n", __func__); rval = EINVAL; break; } fw_dump = (qla_rd_fw_dump_t *)data; if ((fw_dump->minidump == NULL) || (fw_dump->minidump_size != (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size))) { + device_printf(pci_dev, + "%s: minidump buffer [%p] size = [%d, %d] invalid\n", __func__, + fw_dump->minidump, fw_dump->minidump_size, + (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size)); rval = EINVAL; break; } - if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { - if (!ha->hw.mdump_done) - ha->qla_initiate_recovery = 1; - QLA_UNLOCK(ha, __func__); - } else { + if ((ha->pci_func & 0x1)) { + device_printf(pci_dev, "%s: mindump allowed only on Port0\n", __func__); rval = ENXIO; break; } + + fw_dump->saved = 1; + + if (ha->offline) { + + if (ha->enable_minidump) + ql_minidump(ha); + + fw_dump->saved = 0; + fw_dump->usec_ts = ha->hw.mdump_usec_ts; + + if (!ha->hw.mdump_done) { + device_printf(pci_dev, + "%s: port offline minidump failed\n", __func__); + rval = ENXIO; + break; + } + } else { + + if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { + if (!ha->hw.mdump_done) { + fw_dump->saved = 0; + QL_INITIATE_RECOVERY(ha); + device_printf(pci_dev, "%s: recovery initiated " + " to trigger minidump\n", + __func__); + } + QLA_UNLOCK(ha, __func__); + } else { + device_printf(pci_dev, "%s: QLA_LOCK() failed0\n", __func__); + rval = ENXIO; + break; + } #define QLNX_DUMP_WAIT_SECS 30 - count = QLNX_DUMP_WAIT_SECS * 1000; + count = QLNX_DUMP_WAIT_SECS * 1000; - while (count) { - if (ha->hw.mdump_done) - break; - qla_mdelay(__func__, 100); - count -= 100; - } + while (count) { + if (ha->hw.mdump_done) + break; + qla_mdelay(__func__, 100); + count -= 100; + } - if (!ha->hw.mdump_done) { - rval = ENXIO; - break; - } + if (!ha->hw.mdump_done) { + device_printf(pci_dev, + "%s: port not offline minidump failed\n", __func__); + rval = ENXIO; + break; + } + fw_dump->usec_ts = ha->hw.mdump_usec_ts; - if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { - ha->hw.mdump_done = 0; - QLA_UNLOCK(ha, __func__); - } else { - rval = ENXIO; - break; + if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { + ha->hw.mdump_done = 0; + QLA_UNLOCK(ha, __func__); + } else { + device_printf(pci_dev, "%s: QLA_LOCK() failed1\n", __func__); + rval = ENXIO; + break; + } } if ((rval = copyout(ha->hw.mdump_template, fw_dump->minidump, ha->hw.mdump_template_size))) { + device_printf(pci_dev, "%s: template copyout failed\n", __func__); rval = ENXIO; break; } if ((rval = copyout(ha->hw.mdump_buffer, ((uint8_t *)fw_dump->minidump + ha->hw.mdump_template_size), - ha->hw.mdump_buffer_size))) + ha->hw.mdump_buffer_size))) { + device_printf(pci_dev, "%s: minidump copyout failed\n", __func__); rval = ENXIO; + } break; case QLA_RD_DRVR_STATE: rval = ql_drvr_state(ha, (qla_driver_state_t *)data); break; + case QLA_RD_SLOWPATH_LOG: + rval = ql_slowpath_log(ha, (qla_sp_log_t *)data); + break; + case QLA_RD_PCI_IDS: pci_ids = (qla_rd_pci_ids_t *)data; pci_ids->ven_id = pci_get_vendor(pci_dev); pci_ids->dev_id = pci_get_device(pci_dev); pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev); pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev); pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1); break; default: break; } return rval; } + static int ql_drvr_state(qla_host_t *ha, qla_driver_state_t *state) { int rval = 0; uint32_t drvr_state_size; - qla_drvr_state_hdr_t *hdr; drvr_state_size = ql_drvr_state_size(ha); if (state->buffer == NULL) { state->size = drvr_state_size; return (0); } if (state->size < drvr_state_size) return (ENXIO); if (ha->hw.drvr_state == NULL) return (ENOMEM); - hdr = ha->hw.drvr_state; + ql_capture_drvr_state(ha); - if (!hdr->drvr_version_major) - ql_capture_drvr_state(ha); - rval = copyout(ha->hw.drvr_state, state->buffer, drvr_state_size); bzero(ha->hw.drvr_state, drvr_state_size); return (rval); } static uint32_t ql_drvr_state_size(qla_host_t *ha) { uint32_t drvr_state_size; uint32_t size; size = sizeof (qla_drvr_state_hdr_t); drvr_state_size = QL_ALIGN(size, 64); size = ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t)); drvr_state_size += QL_ALIGN(size, 64); size = ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t)); drvr_state_size += QL_ALIGN(size, 64); size = ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t)); drvr_state_size += QL_ALIGN(size, 64); size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS * ha->hw.num_tx_rings; drvr_state_size += QL_ALIGN(size, 64); size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS * ha->hw.num_rds_rings; drvr_state_size += QL_ALIGN(size, 64); size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS * ha->hw.num_sds_rings; drvr_state_size += QL_ALIGN(size, 64); return (drvr_state_size); } static void ql_get_tx_state(qla_host_t *ha, qla_drvr_state_tx_t *tx_state) { int i; for (i = 0; i < ha->hw.num_tx_rings; i++) { tx_state->base_p_addr = ha->hw.tx_cntxt[i].tx_ring_paddr; tx_state->cons_p_addr = ha->hw.tx_cntxt[i].tx_cons_paddr; tx_state->tx_prod_reg = ha->hw.tx_cntxt[i].tx_prod_reg; tx_state->tx_cntxt_id = ha->hw.tx_cntxt[i].tx_cntxt_id; tx_state->txr_free = ha->hw.tx_cntxt[i].txr_free; tx_state->txr_next = ha->hw.tx_cntxt[i].txr_next; tx_state->txr_comp = ha->hw.tx_cntxt[i].txr_comp; tx_state++; } return; } static void ql_get_rx_state(qla_host_t *ha, qla_drvr_state_rx_t *rx_state) { int i; for (i = 0; i < ha->hw.num_rds_rings; i++) { rx_state->prod_std = ha->hw.rds[i].prod_std; rx_state->rx_next = ha->hw.rds[i].rx_next; rx_state++; } return; } static void ql_get_sds_state(qla_host_t *ha, qla_drvr_state_sds_t *sds_state) { int i; for (i = 0; i < ha->hw.num_sds_rings; i++) { sds_state->sdsr_next = ha->hw.sds[i].sdsr_next; sds_state->sds_consumer = ha->hw.sds[i].sds_consumer; sds_state++; } return; } void ql_capture_drvr_state(qla_host_t *ha) { uint8_t *state_buffer; uint8_t *ptr; - uint32_t drvr_state_size; qla_drvr_state_hdr_t *hdr; uint32_t size; int i; - drvr_state_size = ql_drvr_state_size(ha); - state_buffer = ha->hw.drvr_state; if (state_buffer == NULL) return; - - bzero(state_buffer, drvr_state_size); hdr = (qla_drvr_state_hdr_t *)state_buffer; + + hdr->saved = 0; + if (hdr->drvr_version_major) { + hdr->saved = 1; + return; + } + + hdr->usec_ts = qla_get_usec_timestamp(); + hdr->drvr_version_major = QLA_VERSION_MAJOR; hdr->drvr_version_minor = QLA_VERSION_MINOR; hdr->drvr_version_build = QLA_VERSION_BUILD; bcopy(ha->hw.mac_addr, hdr->mac_addr, ETHER_ADDR_LEN); hdr->link_speed = ha->hw.link_speed; hdr->cable_length = ha->hw.cable_length; hdr->cable_oui = ha->hw.cable_oui; hdr->link_up = ha->hw.link_up; hdr->module_type = ha->hw.module_type; hdr->link_faults = ha->hw.link_faults; hdr->rcv_intr_coalesce = ha->hw.rcv_intr_coalesce; hdr->xmt_intr_coalesce = ha->hw.xmt_intr_coalesce; size = sizeof (qla_drvr_state_hdr_t); hdr->tx_state_offset = QL_ALIGN(size, 64); ptr = state_buffer + hdr->tx_state_offset; ql_get_tx_state(ha, (qla_drvr_state_tx_t *)ptr); size = ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t)); hdr->rx_state_offset = hdr->tx_state_offset + QL_ALIGN(size, 64); ptr = state_buffer + hdr->rx_state_offset; ql_get_rx_state(ha, (qla_drvr_state_rx_t *)ptr); size = ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t)); hdr->sds_state_offset = hdr->rx_state_offset + QL_ALIGN(size, 64); ptr = state_buffer + hdr->sds_state_offset; ql_get_sds_state(ha, (qla_drvr_state_sds_t *)ptr); size = ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t)); hdr->txr_offset = hdr->sds_state_offset + QL_ALIGN(size, 64); ptr = state_buffer + hdr->txr_offset; hdr->num_tx_rings = ha->hw.num_tx_rings; hdr->txr_size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS; hdr->txr_entries = NUM_TX_DESCRIPTORS; size = hdr->num_tx_rings * hdr->txr_size; bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size); hdr->rxr_offset = hdr->txr_offset + QL_ALIGN(size, 64); ptr = state_buffer + hdr->rxr_offset; hdr->rxr_size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS; hdr->rxr_entries = NUM_RX_DESCRIPTORS; hdr->num_rx_rings = ha->hw.num_rds_rings; for (i = 0; i < ha->hw.num_rds_rings; i++) { bcopy(ha->hw.dma_buf.rds_ring[i].dma_b, ptr, hdr->rxr_size); ptr += hdr->rxr_size; } size = hdr->rxr_size * hdr->num_rx_rings; hdr->sds_offset = hdr->rxr_offset + QL_ALIGN(size, 64); hdr->sds_ring_size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS; hdr->sds_entries = NUM_STATUS_DESCRIPTORS; hdr->num_sds_rings = ha->hw.num_sds_rings; ptr = state_buffer + hdr->sds_offset; for (i = 0; i < ha->hw.num_sds_rings; i++) { bcopy(ha->hw.dma_buf.sds_ring[i].dma_b, ptr, hdr->sds_ring_size); ptr += hdr->sds_ring_size; } return; } void ql_alloc_drvr_state_buffer(qla_host_t *ha) { uint32_t drvr_state_size; drvr_state_size = ql_drvr_state_size(ha); ha->hw.drvr_state = malloc(drvr_state_size, M_QLA83XXBUF, M_NOWAIT); + if (ha->hw.drvr_state != NULL) + bzero(ha->hw.drvr_state, drvr_state_size); + return; } void ql_free_drvr_state_buffer(qla_host_t *ha) { if (ha->hw.drvr_state != NULL) free(ha->hw.drvr_state, M_QLA83XXBUF); return; +} + +void +ql_sp_log(qla_host_t *ha, uint16_t fmtstr_idx, uint16_t num_params, + uint32_t param0, uint32_t param1, uint32_t param2, uint32_t param3, + uint32_t param4) +{ + qla_sp_log_entry_t *sp_e, *sp_log; + + if (((sp_log = ha->hw.sp_log) == NULL) || ha->hw.sp_log_stop) + return; + + mtx_lock(&ha->sp_log_lock); + + sp_e = &sp_log[ha->hw.sp_log_index]; + + bzero(sp_e, sizeof (qla_sp_log_entry_t)); + + sp_e->fmtstr_idx = fmtstr_idx; + sp_e->num_params = num_params; + + sp_e->usec_ts = qla_get_usec_timestamp(); + + sp_e->params[0] = param0; + sp_e->params[1] = param1; + sp_e->params[2] = param2; + sp_e->params[3] = param3; + sp_e->params[4] = param4; + + ha->hw.sp_log_index = (ha->hw.sp_log_index + 1) & (NUM_LOG_ENTRIES - 1); + + if (ha->hw.sp_log_num_entries < NUM_LOG_ENTRIES) + ha->hw.sp_log_num_entries++; + + mtx_unlock(&ha->sp_log_lock); + + return; +} + +void +ql_alloc_sp_log_buffer(qla_host_t *ha) +{ + uint32_t size; + + size = (sizeof(qla_sp_log_entry_t)) * NUM_LOG_ENTRIES; + + ha->hw.sp_log = malloc(size, M_QLA83XXBUF, M_NOWAIT); + + if (ha->hw.sp_log != NULL) + bzero(ha->hw.sp_log, size); + + ha->hw.sp_log_index = 0; + ha->hw.sp_log_num_entries = 0; + + return; +} + +void +ql_free_sp_log_buffer(qla_host_t *ha) +{ + if (ha->hw.sp_log != NULL) + free(ha->hw.sp_log, M_QLA83XXBUF); + return; +} + +static int +ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log) +{ + int rval = 0; + uint32_t size; + + if ((ha->hw.sp_log == NULL) || (log->buffer == NULL)) + return (EINVAL); + + size = (sizeof(qla_sp_log_entry_t) * NUM_LOG_ENTRIES); + + mtx_lock(&ha->sp_log_lock); + + rval = copyout(ha->hw.sp_log, log->buffer, size); + + if (!rval) { + log->next_idx = ha->hw.sp_log_index; + log->num_entries = ha->hw.sp_log_num_entries; + } + device_printf(ha->pci_dev, + "%s: exit [rval = %d][%p, next_idx = %d, %d entries, %d bytes]\n", + __func__, rval, log->buffer, log->next_idx, log->num_entries, size); + mtx_unlock(&ha->sp_log_lock); + + return (rval); } Index: stable/9/sys/dev/qlxgbe/ql_ioctl.h =================================================================== --- stable/9/sys/dev/qlxgbe/ql_ioctl.h (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_ioctl.h (revision 330557) @@ -1,208 +1,283 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * File: ql_ioctl.h * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #ifndef _QL_IOCTL_H_ #define _QL_IOCTL_H_ #include struct qla_reg_val { uint16_t rd; uint16_t direct; uint32_t reg; uint32_t val; }; typedef struct qla_reg_val qla_reg_val_t; struct qla_rd_flash { uint32_t off; uint32_t data; }; typedef struct qla_rd_flash qla_rd_flash_t; struct qla_wr_flash { uint32_t off; uint32_t size; void *buffer; uint32_t pattern; }; typedef struct qla_wr_flash qla_wr_flash_t; struct qla_erase_flash { uint32_t off; uint32_t size; }; typedef struct qla_erase_flash qla_erase_flash_t; struct qla_rd_pci_ids { uint16_t ven_id; uint16_t dev_id; uint16_t subsys_ven_id; uint16_t subsys_dev_id; uint8_t rev_id; }; typedef struct qla_rd_pci_ids qla_rd_pci_ids_t; +#define NUM_LOG_ENTRY_PARAMS 5 +#define NUM_LOG_ENTRIES 512 + +struct qla_sp_log_entry { + uint32_t fmtstr_idx; + uint32_t num_params; + uint64_t usec_ts; + uint32_t params[NUM_LOG_ENTRY_PARAMS]; +}; +typedef struct qla_sp_log_entry qla_sp_log_entry_t; + /* * structure encapsulating the value to read/write from/to offchip (MS) memory */ struct qla_offchip_mem_val { uint16_t rd; uint64_t off; uint32_t data_lo; uint32_t data_hi; uint32_t data_ulo; uint32_t data_uhi; }; typedef struct qla_offchip_mem_val qla_offchip_mem_val_t; struct qla_rd_fw_dump { uint16_t pci_func; + uint16_t saved; + uint64_t usec_ts; uint32_t minidump_size; void *minidump; }; typedef struct qla_rd_fw_dump qla_rd_fw_dump_t; struct qla_drvr_state_tx { uint64_t base_p_addr; uint64_t cons_p_addr; uint32_t tx_prod_reg; uint32_t tx_cntxt_id; uint32_t txr_free; uint32_t txr_next; uint32_t txr_comp; }; typedef struct qla_drvr_state_tx qla_drvr_state_tx_t; struct qla_drvr_state_sds { uint32_t sdsr_next; /* next entry in SDS ring to process */ uint32_t sds_consumer; }; typedef struct qla_drvr_state_sds qla_drvr_state_sds_t; struct qla_drvr_state_rx { uint32_t prod_std; uint32_t rx_next; /* next standard rcv ring to arm fw */; }; typedef struct qla_drvr_state_rx qla_drvr_state_rx_t; struct qla_drvr_state_hdr { uint32_t drvr_version_major; uint32_t drvr_version_minor; uint32_t drvr_version_build; uint8_t mac_addr[ETHER_ADDR_LEN]; + uint16_t saved; + uint64_t usec_ts; uint16_t link_speed; uint16_t cable_length; uint32_t cable_oui; uint8_t link_up; uint8_t module_type; uint8_t link_faults; uint32_t rcv_intr_coalesce; uint32_t xmt_intr_coalesce; uint32_t tx_state_offset;/* size = sizeof (qla_drvr_state_tx_t) * num_tx_rings */ uint32_t rx_state_offset;/* size = sizeof (qla_drvr_state_rx_t) * num_rx_rings */ uint32_t sds_state_offset;/* size = sizeof (qla_drvr_state_sds_t) * num_sds_rings */ uint32_t num_tx_rings; /* number of tx rings */ uint32_t txr_size; /* size of each tx ring in bytes */ uint32_t txr_entries; /* number of descriptors in each tx ring */ uint32_t txr_offset; /* start of tx ring [0 - #rings] content */ uint32_t num_rx_rings; /* number of rx rings */ uint32_t rxr_size; /* size of each rx ring in bytes */ uint32_t rxr_entries; /* number of descriptors in each rx ring */ uint32_t rxr_offset; /* start of rx ring [0 - #rings] content */ uint32_t num_sds_rings; /* number of sds rings */ uint32_t sds_ring_size; /* size of each sds ring in bytes */ uint32_t sds_entries; /* number of descriptors in each sds ring */ uint32_t sds_offset; /* start of sds ring [0 - #rings] content */ }; typedef struct qla_drvr_state_hdr qla_drvr_state_hdr_t; struct qla_driver_state { uint32_t size; void *buffer; }; typedef struct qla_driver_state qla_driver_state_t; +struct qla_sp_log { + uint32_t next_idx; /* index of next entry in slowpath trace log */ + uint32_t num_entries; /* number of entries in slowpath trace log */ + void *buffer; +}; +typedef struct qla_sp_log qla_sp_log_t; + /* * Read/Write Register */ #define QLA_RDWR_REG _IOWR('q', 1, qla_reg_val_t) /* * Read Flash */ #define QLA_RD_FLASH _IOWR('q', 2, qla_rd_flash_t) /* * Write Flash */ #define QLA_WR_FLASH _IOWR('q', 3, qla_wr_flash_t) /* * Read Offchip (MS) Memory */ #define QLA_RDWR_MS_MEM _IOWR('q', 4, qla_offchip_mem_val_t) /* * Erase Flash */ #define QLA_ERASE_FLASH _IOWR('q', 5, qla_erase_flash_t) /* * Read PCI IDs */ #define QLA_RD_PCI_IDS _IOWR('q', 6, qla_rd_pci_ids_t) /* * Read Minidump Template Size */ #define QLA_RD_FW_DUMP_SIZE _IOWR('q', 7, qla_rd_fw_dump_t) /* * Read Minidump Template */ #define QLA_RD_FW_DUMP _IOWR('q', 8, qla_rd_fw_dump_t) /* * Read Driver State */ #define QLA_RD_DRVR_STATE _IOWR('q', 9, qla_driver_state_t) + +/* + * Read Slowpath Log + */ +#define QLA_RD_SLOWPATH_LOG _IOWR('q', 10, qla_sp_log_t) + +/* + * Format Strings For Slowpath Trace Logs + */ +#define SP_TLOG_FMT_STR_0 \ + "qla_mbx_cmd [%ld]: enter no_pause = %d [0x%08x 0x%08x 0x%08x 0x%08x]\n" + +#define SP_TLOG_FMT_STR_1 \ + "qla_mbx_cmd [%ld]: offline = 0x%08x qla_initiate_recovery = 0x%08x exit1\n" + +#define SP_TLOG_FMT_STR_2 \ + "qla_mbx_cmd [%ld]: qla_initiate_recovery = 0x%08x exit2\n" + +#define SP_TLOG_FMT_STR_3 \ + "qla_mbx_cmd [%ld]: timeout exit3 [host_mbx_cntrl = 0x%08x]\n" + +#define SP_TLOG_FMT_STR_4 \ + "qla_mbx_cmd [%ld]: qla_initiate_recovery = 0x%08x exit4\n" + +#define SP_TLOG_FMT_STR_5 \ + "qla_mbx_cmd [%ld]: timeout exit5 [fw_mbx_cntrl = 0x%08x]\n" + +#define SP_TLOG_FMT_STR_6 \ + "qla_mbx_cmd [%ld]: qla_initiate_recovery = 0x%08x exit6\n" + +#define SP_TLOG_FMT_STR_7 \ + "qla_mbx_cmd [%ld]: exit [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n" + +#define SP_TLOG_FMT_STR_8 \ + "qla_ioctl [%ld]: SIOCSIFADDR if_drv_flags = 0x%08x [0x%08x] ipv4 = 0x%08x\n" + +#define SP_TLOG_FMT_STR_9 \ + "qla_ioctl [%ld]: SIOCSIFMTU if_drv_flags = 0x%08x [0x%08x] max_frame_size = 0x%08x if_mtu = 0x%08x\n" + +#define SP_TLOG_FMT_STR_10 \ + "qla_ioctl [%ld]: SIOCSIFFLAGS if_drv_flags = 0x%08x [0x%08x] ha->if_flags = 0x%08x ifp->if_flags = 0x%08x\n" + +#define SP_TLOG_FMT_STR_11 \ + "qla_ioctl [%ld]: SIOCSIFCAP if_drv_flags = 0x%08x [0x%08x] mask = 0x%08x ifp->if_capenable = 0x%08x\n" + +#define SP_TLOG_FMT_STR_12 \ + "qla_set_multi [%ld]: if_drv_flags = 0x%08x [0x%08x] add_multi = 0x%08x mcnt = 0x%08x\n" + +#define SP_TLOG_FMT_STR_13 \ + "qla_stop [%ld]: \n" + +#define SP_TLOG_FMT_STR_14 \ + "qla_init_locked [%ld]: \n" #endif /* #ifndef _QL_IOCTL_H_ */ Index: stable/9/sys/dev/qlxgbe/ql_isr.c =================================================================== --- stable/9/sys/dev/qlxgbe/ql_isr.c (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_isr.c (revision 330557) @@ -1,999 +1,1011 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: ql_isr.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #include __FBSDID("$FreeBSD$"); #include "ql_os.h" #include "ql_hw.h" #include "ql_def.h" #include "ql_inline.h" #include "ql_ver.h" #include "ql_glbl.h" #include "ql_dbg.h" static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx); static void qla_rcv_error(qla_host_t *ha) { ha->stop_rcv = 1; - ha->qla_initiate_recovery = 1; + QL_INITIATE_RECOVERY(ha); } /* * Name: qla_rx_intr * Function: Handles normal ethernet frames received */ static void qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx) { qla_rx_buf_t *rxb; struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL; struct ifnet *ifp = ha->ifp; qla_sds_t *sdsp; struct ether_vlan_header *eh; uint32_t i, rem_len = 0; uint32_t r_idx = 0; qla_rx_ring_t *rx_ring; struct lro_ctrl *lro; lro = &ha->hw.sds[sds_idx].lro; if (ha->hw.num_rds_rings > 1) r_idx = sds_idx; ha->hw.rds[r_idx].count++; sdsp = &ha->hw.sds[sds_idx]; rx_ring = &ha->rx_ring[r_idx]; for (i = 0; i < sgc->num_handles; i++) { rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF]; QL_ASSERT(ha, (rxb != NULL), ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\ sds_idx)); if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) { /* log the error */ device_printf(ha->pci_dev, "%s invalid rxb[%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return; } mp = rxb->m_head; if (i == 0) mpf = mp; QL_ASSERT(ha, (mp != NULL), ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\ sds_idx)); bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD); rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) { /* log the error */ device_printf(ha->pci_dev, "%s mp == NULL [%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return; } if (i == 0) { mpl = mpf = mp; mp->m_flags |= M_PKTHDR; mp->m_pkthdr.len = sgc->pkt_length; mp->m_pkthdr.rcvif = ifp; rem_len = mp->m_pkthdr.len; } else { mp->m_flags &= ~M_PKTHDR; mpl->m_next = mp; mpl = mp; rem_len = rem_len - mp->m_len; } } mpl->m_len = rem_len; eh = mtod(mpf, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { uint32_t *data = (uint32_t *)eh; mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag); mpf->m_flags |= M_VLANTAG; *(data + 3) = *(data + 2); *(data + 2) = *(data + 1); *(data + 1) = *data; m_adj(mpf, ETHER_VLAN_ENCAP_LEN); } if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) { mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; mpf->m_pkthdr.csum_data = 0xFFFF; } else { mpf->m_pkthdr.csum_flags = 0; } ifp->if_ipackets++; mpf->m_pkthdr.flowid = sgc->rss_hash; mpf->m_flags |= M_FLOWID; #if __FreeBSD_version >= 1100000 M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH); #else #if (__FreeBSD_version >= 903511 && __FreeBSD_version < 1100000) M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE); #else M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE); #endif #endif /* #if __FreeBSD_version >= 1100000 */ if (ha->hw.enable_soft_lro) { #if (__FreeBSD_version >= 1100101) tcp_lro_queue_mbuf(lro, mpf); #else if (tcp_lro_rx(lro, mpf, 0)) (*ifp->if_input)(ifp, mpf); #endif /* #if (__FreeBSD_version >= 1100101) */ } else { (*ifp->if_input)(ifp, mpf); } if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); return; } #define QLA_TCP_HDR_SIZE 20 #define QLA_TCP_TS_OPTION_SIZE 12 /* * Name: qla_lro_intr * Function: Handles normal ethernet frames received */ static int qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx) { qla_rx_buf_t *rxb; struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL; struct ifnet *ifp = ha->ifp; qla_sds_t *sdsp; struct ether_vlan_header *eh; uint32_t i, rem_len = 0, pkt_length, iplen; struct tcphdr *th; struct ip *ip = NULL; struct ip6_hdr *ip6 = NULL; uint16_t etype; uint32_t r_idx = 0; qla_rx_ring_t *rx_ring; if (ha->hw.num_rds_rings > 1) r_idx = sds_idx; ha->hw.rds[r_idx].count++; rx_ring = &ha->rx_ring[r_idx]; ha->hw.rds[r_idx].lro_pkt_count++; sdsp = &ha->hw.sds[sds_idx]; pkt_length = sgc->payload_length + sgc->l4_offset; if (sgc->flags & Q8_LRO_COMP_TS) { pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE; } else { pkt_length += QLA_TCP_HDR_SIZE; } ha->hw.rds[r_idx].lro_bytes += pkt_length; for (i = 0; i < sgc->num_handles; i++) { rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF]; QL_ASSERT(ha, (rxb != NULL), ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\ sds_idx)); if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) { /* log the error */ device_printf(ha->pci_dev, "%s invalid rxb[%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return (0); } mp = rxb->m_head; if (i == 0) mpf = mp; QL_ASSERT(ha, (mp != NULL), ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\ sds_idx)); bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD); rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) { /* log the error */ device_printf(ha->pci_dev, "%s mp == NULL [%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return (0); } if (i == 0) { mpl = mpf = mp; mp->m_flags |= M_PKTHDR; mp->m_pkthdr.len = pkt_length; mp->m_pkthdr.rcvif = ifp; rem_len = mp->m_pkthdr.len; } else { mp->m_flags &= ~M_PKTHDR; mpl->m_next = mp; mpl = mp; rem_len = rem_len - mp->m_len; } } mpl->m_len = rem_len; th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset); if (sgc->flags & Q8_LRO_COMP_PUSH_BIT) th->th_flags |= TH_PUSH; m_adj(mpf, sgc->l2_offset); eh = mtod(mpf, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { uint32_t *data = (uint32_t *)eh; mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag); mpf->m_flags |= M_VLANTAG; *(data + 3) = *(data + 2); *(data + 2) = *(data + 1); *(data + 1) = *data; m_adj(mpf, ETHER_VLAN_ENCAP_LEN); etype = ntohs(eh->evl_proto); } else { etype = ntohs(eh->evl_encap_proto); } if (etype == ETHERTYPE_IP) { ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN); iplen = (ip->ip_hl << 2) + (th->th_off << 2) + sgc->payload_length; ip->ip_len = htons(iplen); ha->ipv4_lro++; M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4); } else if (etype == ETHERTYPE_IPV6) { ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN); iplen = (th->th_off << 2) + sgc->payload_length; ip6->ip6_plen = htons(iplen); ha->ipv6_lro++; M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6); } else { m_freem(mpf); if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); return 0; } mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; mpf->m_pkthdr.csum_data = 0xFFFF; mpf->m_pkthdr.flowid = sgc->rss_hash; mpf->m_flags |= M_FLOWID; ifp->if_ipackets++; (*ifp->if_input)(ifp, mpf); if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); return (0); } static int qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx, uint32_t dcount, uint16_t *handle, uint16_t *nhandles) { uint32_t i; uint16_t num_handles; q80_stat_desc_t *sdesc; uint32_t opcode; *nhandles = 0; dcount--; for (i = 0; i < dcount; i++) { comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1); sdesc = (q80_stat_desc_t *) &ha->hw.sds[sds_idx].sds_ring_base[comp_idx]; opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1])); - if (!opcode) { + if (!opcode || QL_ERR_INJECT(ha, INJCT_INV_CONT_OPCODE)) { device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n", __func__, (void *)sdesc->data[0], (void *)sdesc->data[1]); return -1; } num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1])); if (!num_handles) { device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n", __func__, (void *)sdesc->data[0], (void *)sdesc->data[1]); return -1; } if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID)) num_handles = -1; switch (num_handles) { case 1: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); break; case 2: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); break; case 3: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); break; case 4: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0])); break; case 5: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1])); break; case 6: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1])); *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1])); break; case 7: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1])); *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1])); *handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1])); break; default: device_printf(ha->pci_dev, "%s: invalid num handles %p %p\n", __func__, (void *)sdesc->data[0], (void *)sdesc->data[1]); QL_ASSERT(ha, (0),\ ("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n", __func__, "invalid num handles", sds_idx, num_handles, (void *)sdesc->data[0],(void *)sdesc->data[1])); qla_rcv_error(ha); return 0; } *nhandles = *nhandles + num_handles; } return 0; } /* * Name: ql_rcv_isr * Function: Main Interrupt Service Routine */ uint32_t ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count) { device_t dev; qla_hw_t *hw; uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode; volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL; uint32_t ret = 0; qla_sgl_comp_t sgc; uint16_t nhandles; uint32_t sds_replenish_threshold = 0; uint32_t r_idx = 0; qla_sds_t *sdsp; dev = ha->pci_dev; hw = &ha->hw; hw->sds[sds_idx].rcv_active = 1; if (ha->stop_rcv) { hw->sds[sds_idx].rcv_active = 0; return 0; } QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx)); /* * receive interrupts */ comp_idx = hw->sds[sds_idx].sdsr_next; while (count-- && !ha->stop_rcv) { sdesc = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[comp_idx]; opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1])); if (!opcode) break; switch (opcode) { case Q8_STAT_DESC_OPCODE_RCV_PKT: desc_count = 1; bzero(&sgc, sizeof(qla_sgl_comp_t)); sgc.rcv.pkt_length = Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0])); sgc.rcv.num_handles = 1; sgc.rcv.handle[0] = Q8_STAT_DESC_HANDLE((sdesc->data[0])); sgc.rcv.chksum_status = Q8_STAT_DESC_STATUS((sdesc->data[1])); sgc.rcv.rss_hash = Q8_STAT_DESC_RSS_HASH((sdesc->data[0])); if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) { sgc.rcv.vlan_tag = Q8_STAT_DESC_VLAN_ID((sdesc->data[1])); } qla_rx_intr(ha, &sgc.rcv, sds_idx); break; case Q8_STAT_DESC_OPCODE_SGL_RCV: desc_count = Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1])); if (desc_count > 1) { c_idx = (comp_idx + desc_count -1) & (NUM_STATUS_DESCRIPTORS-1); sdesc0 = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[c_idx]; - if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) != - Q8_STAT_DESC_OPCODE_CONT) { + if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) != + Q8_STAT_DESC_OPCODE_CONT) || + QL_ERR_INJECT(ha, INJCT_SGL_RCV_INV_DESC_COUNT)) { desc_count = 0; break; } } bzero(&sgc, sizeof(qla_sgl_comp_t)); sgc.rcv.pkt_length = Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\ (sdesc->data[0])); sgc.rcv.chksum_status = Q8_STAT_DESC_STATUS((sdesc->data[1])); sgc.rcv.rss_hash = Q8_STAT_DESC_RSS_HASH((sdesc->data[0])); if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) { sgc.rcv.vlan_tag = Q8_STAT_DESC_VLAN_ID((sdesc->data[1])); } QL_ASSERT(ha, (desc_count <= 2) ,\ ("%s: [sds_idx, data0, data1]="\ "%d, %p, %p]\n", __func__, sds_idx,\ (void *)sdesc->data[0],\ (void *)sdesc->data[1])); sgc.rcv.num_handles = 1; sgc.rcv.handle[0] = Q8_STAT_DESC_HANDLE((sdesc->data[0])); if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count, &sgc.rcv.handle[1], &nhandles)) { device_printf(dev, "%s: [sds_idx, dcount, data0, data1]=" "[%d, %d, 0x%llx, 0x%llx]\n", __func__, sds_idx, desc_count, (long long unsigned int)sdesc->data[0], (long long unsigned int)sdesc->data[1]); desc_count = 0; break; } sgc.rcv.num_handles += nhandles; qla_rx_intr(ha, &sgc.rcv, sds_idx); break; case Q8_STAT_DESC_OPCODE_SGL_LRO: desc_count = Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1])); if (desc_count > 1) { c_idx = (comp_idx + desc_count -1) & (NUM_STATUS_DESCRIPTORS-1); sdesc0 = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[c_idx]; - if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) != - Q8_STAT_DESC_OPCODE_CONT) { + if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) != + Q8_STAT_DESC_OPCODE_CONT) || + QL_ERR_INJECT(ha, INJCT_SGL_LRO_INV_DESC_COUNT)) { desc_count = 0; break; } } bzero(&sgc, sizeof(qla_sgl_comp_t)); sgc.lro.payload_length = Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0])); sgc.lro.rss_hash = Q8_STAT_DESC_RSS_HASH((sdesc->data[0])); sgc.lro.num_handles = 1; sgc.lro.handle[0] = Q8_STAT_DESC_HANDLE((sdesc->data[0])); if (Q8_SGL_LRO_STAT_TS((sdesc->data[1]))) sgc.lro.flags |= Q8_LRO_COMP_TS; if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1]))) sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT; sgc.lro.l2_offset = Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1])); sgc.lro.l4_offset = Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1])); if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) { sgc.lro.vlan_tag = Q8_STAT_DESC_VLAN_ID((sdesc->data[1])); } QL_ASSERT(ha, (desc_count <= 7) ,\ ("%s: [sds_idx, data0, data1]="\ "[%d, 0x%llx, 0x%llx]\n",\ __func__, sds_idx,\ (long long unsigned int)sdesc->data[0],\ (long long unsigned int)sdesc->data[1])); if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count, &sgc.lro.handle[1], &nhandles)) { device_printf(dev, "%s: [sds_idx, data0, data1]="\ "[%d, 0x%llx, 0x%llx]\n",\ __func__, sds_idx,\ (long long unsigned int)sdesc->data[0],\ (long long unsigned int)sdesc->data[1]); desc_count = 0; break; } sgc.lro.num_handles += nhandles; if (qla_lro_intr(ha, &sgc.lro, sds_idx)) { device_printf(dev, "%s: [sds_idx, data0, data1]="\ "[%d, 0x%llx, 0x%llx]\n",\ __func__, sds_idx,\ (long long unsigned int)sdesc->data[0],\ (long long unsigned int)sdesc->data[1]); device_printf(dev, "%s: [comp_idx, c_idx, dcount, nhndls]="\ "[%d, %d, %d, %d]\n",\ __func__, comp_idx, c_idx, desc_count, sgc.lro.num_handles); if (desc_count > 1) { device_printf(dev, "%s: [sds_idx, data0, data1]="\ "[%d, 0x%llx, 0x%llx]\n",\ __func__, sds_idx,\ (long long unsigned int)sdesc0->data[0],\ (long long unsigned int)sdesc0->data[1]); } } break; default: device_printf(dev, "%s: default 0x%llx!\n", __func__, (long long unsigned int)sdesc->data[0]); break; } if (desc_count == 0) break; sds_replenish_threshold += desc_count; while (desc_count--) { sdesc->data[0] = 0ULL; sdesc->data[1] = 0ULL; comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1); sdesc = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[comp_idx]; } if (sds_replenish_threshold > ha->hw.sds_cidx_thres) { sds_replenish_threshold = 0; if (hw->sds[sds_idx].sdsr_next != comp_idx) { QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\ comp_idx); } hw->sds[sds_idx].sdsr_next = comp_idx; } } if (ha->hw.enable_soft_lro) { struct lro_ctrl *lro; lro = &ha->hw.sds[sds_idx].lro; #if (__FreeBSD_version >= 1100101) tcp_lro_flush_all(lro); #else struct lro_entry *queued; while ((!SLIST_EMPTY(&lro->lro_active))) { queued = SLIST_FIRST(&lro->lro_active); SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } #endif /* #if (__FreeBSD_version >= 1100101) */ } if (ha->stop_rcv) goto ql_rcv_isr_exit; if (hw->sds[sds_idx].sdsr_next != comp_idx) { QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx); hw->sds[sds_idx].sdsr_next = comp_idx; } else { if (ha->hw.num_rds_rings > 1) r_idx = sds_idx; sdsp = &ha->hw.sds[sds_idx]; if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); } sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx]; opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1])); if (opcode) ret = -1; ql_rcv_isr_exit: hw->sds[sds_idx].rcv_active = 0; return (ret); } void ql_mbx_isr(void *arg) { qla_host_t *ha; uint32_t data; uint32_t prev_link_state; ha = arg; if (ha == NULL) { device_printf(ha->pci_dev, "%s: arg == NULL\n", __func__); return; } data = READ_REG32(ha, Q8_FW_MBOX_CNTRL); if ((data & 0x3) != 0x1) { WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0); return; } data = READ_REG32(ha, Q8_FW_MBOX0); if ((data & 0xF000) != 0x8000) return; data = data & 0xFFFF; switch (data) { case 0x8001: /* It's an AEN */ ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4)); data = READ_REG32(ha, (Q8_FW_MBOX0 + 8)); ha->hw.cable_length = data & 0xFFFF; data = data >> 16; ha->hw.link_speed = data & 0xFFF; data = READ_REG32(ha, (Q8_FW_MBOX0 + 12)); prev_link_state = ha->hw.link_up; - ha->hw.link_up = (((data & 0xFF) == 0) ? 0 : 1); + data = (((data & 0xFF) == 0) ? 0 : 1); + atomic_store_rel_8(&ha->hw.link_up, (uint8_t)data); + + device_printf(ha->pci_dev, + "%s: AEN[0x8001] data = 0x%08x, prev_link_state = 0x%08x\n", + __func__, data, prev_link_state); + if (prev_link_state != ha->hw.link_up) { if (ha->hw.link_up) if_link_state_change(ha->ifp, LINK_STATE_UP); else if_link_state_change(ha->ifp, LINK_STATE_DOWN); } ha->hw.module_type = ((data >> 8) & 0xFF); - ha->hw.flags.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1); - ha->hw.flags.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1); + ha->hw.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1); + ha->hw.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1); data = READ_REG32(ha, (Q8_FW_MBOX0 + 16)); - ha->hw.flags.loopback_mode = data & 0x03; + ha->hw.loopback_mode = data & 0x03; ha->hw.link_faults = (data >> 3) & 0xFF; break; case 0x8100: + device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data); ha->hw.imd_compl=1; break; case 0x8101: ha->async_event = 1; ha->hw.aen_mb0 = 0x8101; ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4)); ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8)); ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12)); ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16)); + device_printf(ha->pci_dev, "%s: AEN[0x%08x 0x%08x 0x%08x 0%08x 0x%08x]\n", + __func__, data, ha->hw.aen_mb1, ha->hw.aen_mb2, + ha->hw.aen_mb3, ha->hw.aen_mb4); break; case 0x8110: /* for now just dump the registers */ { uint32_t ombx[5]; ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4)); ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8)); ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12)); ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16)); ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20)); device_printf(ha->pci_dev, "%s: " "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", __func__, data, ombx[0], ombx[1], ombx[2], ombx[3], ombx[4]); } break; case 0x8130: /* sfp insertion aen */ device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n", __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4))); break; case 0x8131: /* sfp removal aen */ device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__); break; case 0x8140: { uint32_t ombx[3]; ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4)); ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8)); ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12)); device_printf(ha->pci_dev, "%s: " "0x%08x 0x%08x 0x%08x 0x%08x \n", __func__, data, ombx[0], ombx[1], ombx[2]); } break; default: device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data); break; } WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0); WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); return; } static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx) { qla_rx_buf_t *rxb; int count = sdsp->rx_free; uint32_t rx_next; qla_rdesc_t *rdesc; /* we can play with this value via a sysctl */ uint32_t replenish_thresh = ha->hw.rds_pidx_thres; rdesc = &ha->hw.rds[r_idx]; rx_next = rdesc->rx_next; while (count--) { rxb = sdsp->rxb_free; if (rxb == NULL) break; sdsp->rxb_free = rxb->next; sdsp->rx_free--; if (ql_get_mbuf(ha, rxb, NULL) == 0) { qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in, rxb->handle, rxb->paddr, (rxb->m_head)->m_pkthdr.len); rdesc->rx_in++; if (rdesc->rx_in == NUM_RX_DESCRIPTORS) rdesc->rx_in = 0; rdesc->rx_next++; if (rdesc->rx_next == NUM_RX_DESCRIPTORS) rdesc->rx_next = 0; } else { device_printf(ha->pci_dev, "%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n", __func__, r_idx, rdesc->rx_in, rxb->handle); rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; break; } if (replenish_thresh-- == 0) { QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std, rdesc->rx_next); rx_next = rdesc->rx_next; replenish_thresh = ha->hw.rds_pidx_thres; } } if (rx_next != rdesc->rx_next) { QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std, rdesc->rx_next); } } void ql_isr(void *arg) { qla_ivec_t *ivec = arg; qla_host_t *ha ; int idx; qla_hw_t *hw; struct ifnet *ifp; qla_tx_fp_t *fp; ha = ivec->ha; hw = &ha->hw; ifp = ha->ifp; if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings) return; fp = &ha->tx_fp[idx]; hw->sds[idx].intr_count++; if ((fp->fp_taskqueue != NULL) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); return; } Index: stable/9/sys/dev/qlxgbe/ql_misc.c =================================================================== --- stable/9/sys/dev/qlxgbe/ql_misc.c (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_misc.c (revision 330557) @@ -1,1414 +1,1423 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File : ql_misc.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #include __FBSDID("$FreeBSD$"); #include "ql_os.h" #include "ql_hw.h" #include "ql_def.h" #include "ql_inline.h" #include "ql_glbl.h" #include "ql_dbg.h" #include "ql_tmplt.h" #define QL_FDT_OFFSET 0x3F0000 #define Q8_FLASH_SECTOR_SIZE 0x10000 static int qla_ld_fw_init(qla_host_t *ha); /* * structure encapsulating the value to read/write to offchip memory */ typedef struct _offchip_mem_val { uint32_t data_lo; uint32_t data_hi; uint32_t data_ulo; uint32_t data_uhi; } offchip_mem_val_t; /* * Name: ql_rdwr_indreg32 * Function: Read/Write an Indirect Register */ int ql_rdwr_indreg32(qla_host_t *ha, uint32_t addr, uint32_t *val, uint32_t rd) { uint32_t wnd_reg; uint32_t count = 100; wnd_reg = (Q8_CRB_WINDOW_PF0 | (ha->pci_func << 2)); WRITE_REG32(ha, wnd_reg, addr); while (count--) { if (READ_REG32(ha, wnd_reg) == addr) break; qla_mdelay(__func__, 1); } if (!count || QL_ERR_INJECT(ha, INJCT_RDWR_INDREG_FAILURE)) { device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x, %d] failed\n", __func__, addr, *val, rd); - ha->qla_initiate_recovery = 1; + QL_INITIATE_RECOVERY(ha); return -1; } if (rd) { *val = READ_REG32(ha, Q8_WILD_CARD); } else { WRITE_REG32(ha, Q8_WILD_CARD, *val); } return 0; } /* * Name: ql_rdwr_offchip_mem * Function: Read/Write OffChip Memory */ int ql_rdwr_offchip_mem(qla_host_t *ha, uint64_t addr, q80_offchip_mem_val_t *val, uint32_t rd) { uint32_t count = 100; uint32_t data, step = 0; if (QL_ERR_INJECT(ha, INJCT_RDWR_OFFCHIPMEM_FAILURE)) goto exit_ql_rdwr_offchip_mem; data = (uint32_t)addr; if (ql_rdwr_indreg32(ha, Q8_MS_ADDR_LO, &data, 0)) { step = 1; goto exit_ql_rdwr_offchip_mem; } data = (uint32_t)(addr >> 32); if (ql_rdwr_indreg32(ha, Q8_MS_ADDR_HI, &data, 0)) { step = 2; goto exit_ql_rdwr_offchip_mem; } data = BIT_1; if (ql_rdwr_indreg32(ha, Q8_MS_CNTRL, &data, 0)) { step = 3; goto exit_ql_rdwr_offchip_mem; } if (!rd) { data = val->data_lo; if (ql_rdwr_indreg32(ha, Q8_MS_WR_DATA_0_31, &data, 0)) { step = 4; goto exit_ql_rdwr_offchip_mem; } data = val->data_hi; if (ql_rdwr_indreg32(ha, Q8_MS_WR_DATA_32_63, &data, 0)) { step = 5; goto exit_ql_rdwr_offchip_mem; } data = val->data_ulo; if (ql_rdwr_indreg32(ha, Q8_MS_WR_DATA_64_95, &data, 0)) { step = 6; goto exit_ql_rdwr_offchip_mem; } data = val->data_uhi; if (ql_rdwr_indreg32(ha, Q8_MS_WR_DATA_96_127, &data, 0)) { step = 7; goto exit_ql_rdwr_offchip_mem; } data = (BIT_2|BIT_1|BIT_0); if (ql_rdwr_indreg32(ha, Q8_MS_CNTRL, &data, 0)) { step = 7; goto exit_ql_rdwr_offchip_mem; } } else { data = (BIT_1|BIT_0); if (ql_rdwr_indreg32(ha, Q8_MS_CNTRL, &data, 0)) { step = 8; goto exit_ql_rdwr_offchip_mem; } } while (count--) { if (ql_rdwr_indreg32(ha, Q8_MS_CNTRL, &data, 1)) { step = 9; goto exit_ql_rdwr_offchip_mem; } if (!(data & BIT_3)) { if (rd) { if (ql_rdwr_indreg32(ha, Q8_MS_RD_DATA_0_31, &data, 1)) { step = 10; goto exit_ql_rdwr_offchip_mem; } val->data_lo = data; if (ql_rdwr_indreg32(ha, Q8_MS_RD_DATA_32_63, &data, 1)) { step = 11; goto exit_ql_rdwr_offchip_mem; } val->data_hi = data; if (ql_rdwr_indreg32(ha, Q8_MS_RD_DATA_64_95, &data, 1)) { step = 12; goto exit_ql_rdwr_offchip_mem; } val->data_ulo = data; if (ql_rdwr_indreg32(ha, Q8_MS_RD_DATA_96_127, &data, 1)) { step = 13; goto exit_ql_rdwr_offchip_mem; } val->data_uhi = data; } return 0; } else qla_mdelay(__func__, 1); } exit_ql_rdwr_offchip_mem: device_printf(ha->pci_dev, "%s: [0x%08x 0x%08x : 0x%08x 0x%08x 0x%08x 0x%08x]" " [%d] [%d] failed\n", __func__, (uint32_t)(addr >> 32), (uint32_t)(addr), val->data_lo, val->data_hi, val->data_ulo, val->data_uhi, rd, step); - ha->qla_initiate_recovery = 1; + QL_INITIATE_RECOVERY(ha); return (-1); } /* * Name: ql_rd_flash32 * Function: Read Flash Memory */ int ql_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data) { uint32_t data32; if (qla_sem_lock(ha, Q8_FLASH_LOCK, Q8_FLASH_LOCK_ID, 0xABCDABCD)) { device_printf(ha->pci_dev, "%s: Q8_FLASH_LOCK failed\n", __func__); return (-1); } data32 = addr; if (ql_rdwr_indreg32(ha, Q8_FLASH_DIRECT_WINDOW, &data32, 0)) { qla_sem_unlock(ha, Q8_FLASH_UNLOCK); device_printf(ha->pci_dev, "%s: Q8_FLASH_DIRECT_WINDOW[0x%08x] failed\n", __func__, data32); return (-1); } data32 = Q8_FLASH_DIRECT_DATA | (addr & 0xFFFF); if (ql_rdwr_indreg32(ha, data32, data, 1)) { qla_sem_unlock(ha, Q8_FLASH_UNLOCK); device_printf(ha->pci_dev, "%s: data32:data [0x%08x] failed\n", __func__, data32); return (-1); } qla_sem_unlock(ha, Q8_FLASH_UNLOCK); return 0; } static int qla_get_fdt(qla_host_t *ha) { uint32_t data32; int count; qla_hw_t *hw; hw = &ha->hw; for (count = 0; count < sizeof(qla_flash_desc_table_t); count+=4) { if (ql_rd_flash32(ha, QL_FDT_OFFSET + count, (uint32_t *)&hw->fdt + (count >> 2))) { device_printf(ha->pci_dev, "%s: Read QL_FDT_OFFSET + %d failed\n", __func__, count); return (-1); } } if (qla_sem_lock(ha, Q8_FLASH_LOCK, Q8_FLASH_LOCK_ID, Q8_FDT_LOCK_MAGIC_ID)) { device_printf(ha->pci_dev, "%s: Q8_FLASH_LOCK failed\n", __func__); return (-1); } data32 = Q8_FDT_FLASH_ADDR_VAL; if (ql_rdwr_indreg32(ha, Q8_FLASH_ADDRESS, &data32, 0)) { qla_sem_unlock(ha, Q8_FLASH_UNLOCK); device_printf(ha->pci_dev, "%s: Write to Q8_FLASH_ADDRESS failed\n", __func__); return (-1); } data32 = Q8_FDT_FLASH_CTRL_VAL; if (ql_rdwr_indreg32(ha, Q8_FLASH_CONTROL, &data32, 0)) { qla_sem_unlock(ha, Q8_FLASH_UNLOCK); device_printf(ha->pci_dev, "%s: Write to Q8_FLASH_CONTROL failed\n", __func__); return (-1); } count = 0; do { if (count < 1000) { QLA_USEC_DELAY(10); count += 10; } else { qla_mdelay(__func__, 1); count += 1000; } data32 = 0; if (ql_rdwr_indreg32(ha, Q8_FLASH_STATUS, &data32, 1)) { qla_sem_unlock(ha, Q8_FLASH_UNLOCK); device_printf(ha->pci_dev, "%s: Read Q8_FLASH_STATUS failed\n", __func__); return (-1); } data32 &= 0x6; } while ((count < 10000) && (data32 != 0x6)); if (data32 != 0x6) { qla_sem_unlock(ha, Q8_FLASH_UNLOCK); device_printf(ha->pci_dev, "%s: Poll Q8_FLASH_STATUS failed\n", __func__); return (-1); } if (ql_rdwr_indreg32(ha, Q8_FLASH_RD_DATA, &data32, 1)) { qla_sem_unlock(ha, Q8_FLASH_UNLOCK); device_printf(ha->pci_dev, "%s: Read Q8_FLASH_RD_DATA failed\n", __func__); return (-1); } qla_sem_unlock(ha, Q8_FLASH_UNLOCK); data32 &= Q8_FDT_MASK_VAL; if (hw->fdt.flash_manuf == data32) return (0); else return (-1); } static int qla_flash_write_enable(qla_host_t *ha, int enable) { uint32_t data32; int count = 0; data32 = Q8_WR_ENABLE_FL_ADDR | ha->hw.fdt.write_statusreg_cmd; if (ql_rdwr_indreg32(ha, Q8_FLASH_ADDRESS, &data32, 0)) { device_printf(ha->pci_dev, "%s: Write to Q8_FLASH_ADDRESS failed\n", __func__); return (-1); } if (enable) data32 = ha->hw.fdt.write_enable_bits; else data32 = ha->hw.fdt.write_disable_bits; if (ql_rdwr_indreg32(ha, Q8_FLASH_WR_DATA, &data32, 0)) { device_printf(ha->pci_dev, "%s: Write to Q8_FLASH_WR_DATA failed\n", __func__); return (-1); } data32 = Q8_WR_ENABLE_FL_CTRL; if (ql_rdwr_indreg32(ha, Q8_FLASH_CONTROL, &data32, 0)) { device_printf(ha->pci_dev, "%s: Write to Q8_FLASH_CONTROL failed\n", __func__); return (-1); } do { if (count < 1000) { QLA_USEC_DELAY(10); count += 10; } else { qla_mdelay(__func__, 1); count += 1000; } data32 = 0; if (ql_rdwr_indreg32(ha, Q8_FLASH_STATUS, &data32, 1)) { device_printf(ha->pci_dev, "%s: Read Q8_FLASH_STATUS failed\n", __func__); return (-1); } data32 &= 0x6; } while ((count < 10000) && (data32 != 0x6)); if (data32 != 0x6) { device_printf(ha->pci_dev, "%s: Poll Q8_FLASH_STATUS failed\n", __func__); return (-1); } return 0; } static int qla_erase_flash_sector(qla_host_t *ha, uint32_t start) { uint32_t data32; int count = 0; do { qla_mdelay(__func__, 1); data32 = 0; if (ql_rdwr_indreg32(ha, Q8_FLASH_STATUS, &data32, 1)) { device_printf(ha->pci_dev, "%s: Read Q8_FLASH_STATUS failed\n", __func__); return (-1); } data32 &= 0x6; } while (((count++) < 1000) && (data32 != 0x6)); if (data32 != 0x6) { device_printf(ha->pci_dev, "%s: Poll Q8_FLASH_STATUS failed\n", __func__); return (-1); } data32 = (start >> 16) & 0xFF; if (ql_rdwr_indreg32(ha, Q8_FLASH_WR_DATA, &data32, 0)) { device_printf(ha->pci_dev, "%s: Write to Q8_FLASH_WR_DATA failed\n", __func__); return (-1); } data32 = Q8_ERASE_FL_ADDR_MASK | ha->hw.fdt.erase_cmd; if (ql_rdwr_indreg32(ha, Q8_FLASH_ADDRESS, &data32, 0)) { device_printf(ha->pci_dev, "%s: Write to Q8_FLASH_ADDRESS failed\n", __func__); return (-1); } data32 = Q8_ERASE_FL_CTRL_MASK; if (ql_rdwr_indreg32(ha, Q8_FLASH_CONTROL, &data32, 0)) { device_printf(ha->pci_dev, "%s: Write to Q8_FLASH_CONTROL failed\n", __func__); return (-1); } count = 0; do { qla_mdelay(__func__, 1); data32 = 0; if (ql_rdwr_indreg32(ha, Q8_FLASH_STATUS, &data32, 1)) { device_printf(ha->pci_dev, "%s: Read Q8_FLASH_STATUS failed\n", __func__); return (-1); } data32 &= 0x6; } while (((count++) < 1000) && (data32 != 0x6)); if (data32 != 0x6) { device_printf(ha->pci_dev, "%s: Poll Q8_FLASH_STATUS failed\n", __func__); return (-1); } return 0; } int ql_erase_flash(qla_host_t *ha, uint32_t off, uint32_t size) { int rval = 0; uint32_t start; if (off & (Q8_FLASH_SECTOR_SIZE -1)) return (-1); if (qla_sem_lock(ha, Q8_FLASH_LOCK, Q8_FLASH_LOCK_ID, Q8_ERASE_LOCK_MAGIC_ID)) { device_printf(ha->pci_dev, "%s: Q8_FLASH_LOCK failed\n", __func__); return (-1); } if (qla_flash_write_enable(ha, 1) != 0) { rval = -1; goto ql_erase_flash_exit; } for (start = off; start < (off + size); start = start + Q8_FLASH_SECTOR_SIZE) { if (qla_erase_flash_sector(ha, start)) { rval = -1; break; } } rval = qla_flash_write_enable(ha, 0); ql_erase_flash_exit: qla_sem_unlock(ha, Q8_FLASH_UNLOCK); return (rval); } static int qla_wr_flash32(qla_host_t *ha, uint32_t off, uint32_t *data) { uint32_t data32; int count = 0; data32 = Q8_WR_FL_ADDR_MASK | (off >> 2); if (ql_rdwr_indreg32(ha, Q8_FLASH_ADDRESS, &data32, 0)) { device_printf(ha->pci_dev, "%s: Write to Q8_FLASH_ADDRESS failed\n", __func__); return (-1); } if (ql_rdwr_indreg32(ha, Q8_FLASH_WR_DATA, data, 0)) { device_printf(ha->pci_dev, "%s: Write to Q8_FLASH_WR_DATA failed\n", __func__); return (-1); } data32 = Q8_WR_FL_CTRL_MASK; if (ql_rdwr_indreg32(ha, Q8_FLASH_CONTROL, &data32, 0)) { device_printf(ha->pci_dev, "%s: Write to Q8_FLASH_CONTROL failed\n", __func__); return (-1); } do { if (count < 1000) { QLA_USEC_DELAY(10); count += 10; } else { qla_mdelay(__func__, 1); count += 1000; } data32 = 0; if (ql_rdwr_indreg32(ha, Q8_FLASH_STATUS, &data32, 1)) { device_printf(ha->pci_dev, "%s: Read Q8_FLASH_STATUS failed\n", __func__); return (-1); } data32 &= 0x6; } while ((count < 10000) && (data32 != 0x6)); if (data32 != 0x6) { device_printf(ha->pci_dev, "%s: Poll Q8_FLASH_STATUS failed\n", __func__); return (-1); } return 0; } static int qla_flash_write_data(qla_host_t *ha, uint32_t off, uint32_t size, void *data) { int rval = 0; uint32_t start; uint32_t *data32 = data; if (qla_sem_lock(ha, Q8_FLASH_LOCK, Q8_FLASH_LOCK_ID, Q8_WR_FL_LOCK_MAGIC_ID)) { device_printf(ha->pci_dev, "%s: Q8_FLASH_LOCK failed\n", __func__); rval = -1; goto qla_flash_write_data_exit; } if ((qla_flash_write_enable(ha, 1) != 0)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); rval = -1; goto qla_flash_write_data_unlock_exit; } for (start = off; start < (off + size); start = start + 4) { if (*data32 != 0xFFFFFFFF) { if (qla_wr_flash32(ha, start, data32)) { rval = -1; break; } } data32++; } rval = qla_flash_write_enable(ha, 0); qla_flash_write_data_unlock_exit: qla_sem_unlock(ha, Q8_FLASH_UNLOCK); qla_flash_write_data_exit: return (rval); } int ql_wr_flash_buffer(qla_host_t *ha, uint32_t off, uint32_t size, void *buf) { int rval = 0; void *data; if (size == 0) return 0; size = size << 2; if (buf == NULL) return -1; if ((data = malloc(size, M_QLA83XXBUF, M_NOWAIT)) == NULL) { device_printf(ha->pci_dev, "%s: malloc failed \n", __func__); rval = -1; goto ql_wr_flash_buffer_exit; } if ((rval = copyin(buf, data, size))) { device_printf(ha->pci_dev, "%s copyin failed\n", __func__); goto ql_wr_flash_buffer_free_exit; } rval = qla_flash_write_data(ha, off, size, data); ql_wr_flash_buffer_free_exit: free(data, M_QLA83XXBUF); ql_wr_flash_buffer_exit: return (rval); } #ifdef QL_LDFLASH_FW /* * Name: qla_load_fw_from_flash * Function: Reads the Bootloader from Flash and Loads into Offchip Memory */ static void qla_load_fw_from_flash(qla_host_t *ha) { uint32_t flash_off = 0x10000; uint64_t mem_off; uint32_t count, mem_size; q80_offchip_mem_val_t val; mem_off = (uint64_t)(READ_REG32(ha, Q8_BOOTLD_ADDR)); mem_size = READ_REG32(ha, Q8_BOOTLD_SIZE); device_printf(ha->pci_dev, "%s: [0x%08x][0x%08x]\n", __func__, (uint32_t)mem_off, mem_size); /* only bootloader needs to be loaded into memory */ for (count = 0; count < mem_size ; ) { ql_rd_flash32(ha, flash_off, &val.data_lo); count = count + 4; flash_off = flash_off + 4; ql_rd_flash32(ha, flash_off, &val.data_hi); count = count + 4; flash_off = flash_off + 4; ql_rd_flash32(ha, flash_off, &val.data_ulo); count = count + 4; flash_off = flash_off + 4; ql_rd_flash32(ha, flash_off, &val.data_uhi); count = count + 4; flash_off = flash_off + 4; ql_rdwr_offchip_mem(ha, mem_off, &val, 0); mem_off = mem_off + 16; } return; } #endif /* #ifdef QL_LDFLASH_FW */ /* * Name: qla_init_from_flash * Function: Performs Initialization which consists of the following sequence * - reset * - CRB Init * - Peg Init * - Read the Bootloader from Flash and Load into Offchip Memory * - Kick start the bootloader which loads the rest of the firmware * and performs the remaining steps in the initialization process. */ static int qla_init_from_flash(qla_host_t *ha) { uint32_t delay = 300; uint32_t data; qla_ld_fw_init(ha); do { data = READ_REG32(ha, Q8_CMDPEG_STATE); QL_DPRINT2(ha, (ha->pci_dev, "%s: func[%d] cmdpegstate 0x%08x\n", __func__, ha->pci_func, data)); if (data == 0xFF01) { QL_DPRINT2(ha, (ha->pci_dev, "%s: func[%d] init complete\n", __func__, ha->pci_func)); return(0); } qla_mdelay(__func__, 100); } while (delay--); return (-1); } /* * Name: ql_init_hw * Function: Initializes P3+ hardware. */ int ql_init_hw(qla_host_t *ha) { device_t dev; int ret = 0; uint32_t val, delay = 300; dev = ha->pci_dev; QL_DPRINT1(ha, (dev, "%s: enter\n", __func__)); if (ha->pci_func & 0x1) { while ((ha->pci_func & 0x1) && delay--) { val = READ_REG32(ha, Q8_CMDPEG_STATE); if (val == 0xFF01) { QL_DPRINT2(ha, (dev, "%s: func = %d init complete\n", __func__, ha->pci_func)); qla_mdelay(__func__, 100); goto qla_init_exit; } qla_mdelay(__func__, 100); } - return (-1); + ret = -1; + goto ql_init_hw_exit; } val = READ_REG32(ha, Q8_CMDPEG_STATE); - if (!cold || (val != 0xFF01)) { + if (!cold || (val != 0xFF01) || ha->qla_initiate_recovery) { ret = qla_init_from_flash(ha); qla_mdelay(__func__, 100); } qla_init_exit: ha->fw_ver_major = READ_REG32(ha, Q8_FW_VER_MAJOR); ha->fw_ver_minor = READ_REG32(ha, Q8_FW_VER_MINOR); ha->fw_ver_sub = READ_REG32(ha, Q8_FW_VER_SUB); if (qla_get_fdt(ha) != 0) { device_printf(dev, "%s: qla_get_fdt failed\n", __func__); } else { ha->hw.flags.fdt_valid = 1; } +ql_init_hw_exit: + + if (ret) { + if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_HW_INIT_FAILURE) + ha->hw.sp_log_stop = -1; + } + return (ret); } void ql_read_mac_addr(qla_host_t *ha) { uint8_t *macp; uint32_t mac_lo; uint32_t mac_hi; uint32_t flash_off; flash_off = Q8_BOARD_CONFIG_OFFSET + Q8_BOARD_CONFIG_MAC0_LO + (ha->pci_func << 3); ql_rd_flash32(ha, flash_off, &mac_lo); flash_off += 4; ql_rd_flash32(ha, flash_off, &mac_hi); macp = (uint8_t *)&mac_lo; ha->hw.mac_addr[5] = macp[0]; ha->hw.mac_addr[4] = macp[1]; ha->hw.mac_addr[3] = macp[2]; ha->hw.mac_addr[2] = macp[3]; macp = (uint8_t *)&mac_hi; ha->hw.mac_addr[1] = macp[0]; ha->hw.mac_addr[0] = macp[1]; //device_printf(ha->pci_dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n", // __func__, ha->hw.mac_addr[0], ha->hw.mac_addr[1], // ha->hw.mac_addr[2], ha->hw.mac_addr[3], // ha->hw.mac_addr[4], ha->hw.mac_addr[5]); return; } /* * Stop/Start/Initialization Handling */ static uint16_t qla_tmplt_16bit_checksum(qla_host_t *ha, uint16_t *buf, uint32_t size) { uint32_t sum = 0; uint32_t count = size >> 1; /* size in 16 bit words */ while (count-- > 0) sum += *buf++; while (sum >> 16) sum = (sum & 0xFFFF) + (sum >> 16); return (~sum); } static int qla_wr_list(qla_host_t *ha, q8_ce_hdr_t *ce_hdr) { q8_wrl_e_t *wr_l; int i; wr_l = (q8_wrl_e_t *)((uint8_t *)ce_hdr + sizeof (q8_ce_hdr_t)); for (i = 0; i < ce_hdr->opcount; i++, wr_l++) { if (ql_rdwr_indreg32(ha, wr_l->addr, &wr_l->value, 0)) { device_printf(ha->pci_dev, "%s: [0x%08x 0x%08x] error\n", __func__, wr_l->addr, wr_l->value); return -1; } if (ce_hdr->delay_to) { DELAY(ce_hdr->delay_to); } } return 0; } static int qla_rd_wr_list(qla_host_t *ha, q8_ce_hdr_t *ce_hdr) { q8_rdwrl_e_t *rd_wr_l; uint32_t data; int i; rd_wr_l = (q8_rdwrl_e_t *)((uint8_t *)ce_hdr + sizeof (q8_ce_hdr_t)); for (i = 0; i < ce_hdr->opcount; i++, rd_wr_l++) { if (ql_rdwr_indreg32(ha, rd_wr_l->rd_addr, &data, 1)) { device_printf(ha->pci_dev, "%s: [0x%08x] error\n", __func__, rd_wr_l->rd_addr); return -1; } if (ql_rdwr_indreg32(ha, rd_wr_l->wr_addr, &data, 0)) { device_printf(ha->pci_dev, "%s: [0x%08x 0x%08x] error\n", __func__, rd_wr_l->wr_addr, data); return -1; } if (ce_hdr->delay_to) { DELAY(ce_hdr->delay_to); } } return 0; } static int qla_poll_reg(qla_host_t *ha, uint32_t addr, uint32_t ms_to, uint32_t tmask, uint32_t tvalue) { uint32_t data; while (ms_to) { if (ql_rdwr_indreg32(ha, addr, &data, 1)) { device_printf(ha->pci_dev, "%s: [0x%08x] error\n", __func__, addr); return -1; } if ((data & tmask) != tvalue) { ms_to--; } else break; qla_mdelay(__func__, 1); } return ((ms_to ? 0: -1)); } static int qla_poll_list(qla_host_t *ha, q8_ce_hdr_t *ce_hdr) { int i; q8_poll_hdr_t *phdr; q8_poll_e_t *pe; uint32_t data; phdr = (q8_poll_hdr_t *)((uint8_t *)ce_hdr + sizeof (q8_ce_hdr_t)); pe = (q8_poll_e_t *)((uint8_t *)phdr + sizeof(q8_poll_hdr_t)); for (i = 0; i < ce_hdr->opcount; i++, pe++) { if (ql_rdwr_indreg32(ha, pe->addr, &data, 1)) { device_printf(ha->pci_dev, "%s: [0x%08x] error\n", __func__, pe->addr); return -1; } if (ce_hdr->delay_to) { if ((data & phdr->tmask) == phdr->tvalue) break; if (qla_poll_reg(ha, pe->addr, ce_hdr->delay_to, phdr->tmask, phdr->tvalue)) { if (ql_rdwr_indreg32(ha, pe->to_addr, &data, 1)) { device_printf(ha->pci_dev, "%s: [0x%08x] error\n", __func__, pe->to_addr); return -1; } if (ql_rdwr_indreg32(ha, pe->addr, &data, 1)) { device_printf(ha->pci_dev, "%s: [0x%08x] error\n", __func__, pe->addr); return -1; } } } } return 0; } static int qla_poll_write_list(qla_host_t *ha, q8_ce_hdr_t *ce_hdr) { int i; q8_poll_hdr_t *phdr; q8_poll_wr_e_t *wr_e; phdr = (q8_poll_hdr_t *)((uint8_t *)ce_hdr + sizeof (q8_ce_hdr_t)); wr_e = (q8_poll_wr_e_t *)((uint8_t *)phdr + sizeof(q8_poll_hdr_t)); for (i = 0; i < ce_hdr->opcount; i++, wr_e++) { if (ql_rdwr_indreg32(ha, wr_e->dr_addr, &wr_e->dr_value, 0)) { device_printf(ha->pci_dev, "%s: [0x%08x 0x%08x] error\n", __func__, wr_e->dr_addr, wr_e->dr_value); return -1; } if (ql_rdwr_indreg32(ha, wr_e->ar_addr, &wr_e->ar_value, 0)) { device_printf(ha->pci_dev, "%s: [0x%08x 0x%08x] error\n", __func__, wr_e->ar_addr, wr_e->ar_value); return -1; } if (ce_hdr->delay_to) { if (qla_poll_reg(ha, wr_e->ar_addr, ce_hdr->delay_to, phdr->tmask, phdr->tvalue)) device_printf(ha->pci_dev, "%s: " "[ar_addr, ar_value, delay, tmask," "tvalue] [0x%08x 0x%08x 0x%08x 0x%08x" " 0x%08x]\n", __func__, wr_e->ar_addr, wr_e->ar_value, ce_hdr->delay_to, phdr->tmask, phdr->tvalue); } } return 0; } static int qla_poll_read_list(qla_host_t *ha, q8_ce_hdr_t *ce_hdr) { int i; q8_poll_hdr_t *phdr; q8_poll_rd_e_t *rd_e; uint32_t value; phdr = (q8_poll_hdr_t *)((uint8_t *)ce_hdr + sizeof (q8_ce_hdr_t)); rd_e = (q8_poll_rd_e_t *)((uint8_t *)phdr + sizeof(q8_poll_hdr_t)); for (i = 0; i < ce_hdr->opcount; i++, rd_e++) { if (ql_rdwr_indreg32(ha, rd_e->ar_addr, &rd_e->ar_value, 0)) { device_printf(ha->pci_dev, "%s: [0x%08x 0x%08x] error\n", __func__, rd_e->ar_addr, rd_e->ar_value); return -1; } if (ce_hdr->delay_to) { if (qla_poll_reg(ha, rd_e->ar_addr, ce_hdr->delay_to, phdr->tmask, phdr->tvalue)) { return (-1); } else { if (ql_rdwr_indreg32(ha, rd_e->dr_addr, &value, 1)) { device_printf(ha->pci_dev, "%s: [0x%08x] error\n", __func__, rd_e->ar_addr); return -1; } ha->hw.rst_seq[ha->hw.rst_seq_idx++] = value; if (ha->hw.rst_seq_idx == Q8_MAX_RESET_SEQ_IDX) ha->hw.rst_seq_idx = 1; } } } return 0; } static int qla_rdmwr(qla_host_t *ha, uint32_t raddr, uint32_t waddr, q8_rdmwr_hdr_t *hdr) { uint32_t value; if (hdr->index_a >= Q8_MAX_RESET_SEQ_IDX) { device_printf(ha->pci_dev, "%s: [0x%08x] error\n", __func__, hdr->index_a); return -1; } if (hdr->index_a) { value = ha->hw.rst_seq[hdr->index_a]; } else { if (ql_rdwr_indreg32(ha, raddr, &value, 1)) { device_printf(ha->pci_dev, "%s: [0x%08x] error\n", __func__, raddr); return -1; } } value &= hdr->and_value; value <<= hdr->shl; value >>= hdr->shr; value |= hdr->or_value; value ^= hdr->xor_value; if (ql_rdwr_indreg32(ha, waddr, &value, 0)) { device_printf(ha->pci_dev, "%s: [0x%08x] error\n", __func__, raddr); return -1; } return 0; } static int qla_read_modify_write_list(qla_host_t *ha, q8_ce_hdr_t *ce_hdr) { int i; q8_rdmwr_hdr_t *rdmwr_hdr; q8_rdmwr_e_t *rdmwr_e; rdmwr_hdr = (q8_rdmwr_hdr_t *)((uint8_t *)ce_hdr + sizeof (q8_ce_hdr_t)); rdmwr_e = (q8_rdmwr_e_t *)((uint8_t *)rdmwr_hdr + sizeof(q8_rdmwr_hdr_t)); for (i = 0; i < ce_hdr->opcount; i++, rdmwr_e++) { if (qla_rdmwr(ha, rdmwr_e->rd_addr, rdmwr_e->wr_addr, rdmwr_hdr)) { return -1; } if (ce_hdr->delay_to) { DELAY(ce_hdr->delay_to); } } return 0; } static int qla_tmplt_execute(qla_host_t *ha, uint8_t *buf, int start_idx, int *end_idx, uint32_t nentries) { int i, ret = 0, proc_end = 0; q8_ce_hdr_t *ce_hdr; for (i = start_idx; ((i < nentries) && (!proc_end)); i++) { ce_hdr = (q8_ce_hdr_t *)buf; ret = 0; switch (ce_hdr->opcode) { case Q8_CE_OPCODE_NOP: break; case Q8_CE_OPCODE_WRITE_LIST: ret = qla_wr_list(ha, ce_hdr); //printf("qla_wr_list %d\n", ret); break; case Q8_CE_OPCODE_READ_WRITE_LIST: ret = qla_rd_wr_list(ha, ce_hdr); //printf("qla_rd_wr_list %d\n", ret); break; case Q8_CE_OPCODE_POLL_LIST: ret = qla_poll_list(ha, ce_hdr); //printf("qla_poll_list %d\n", ret); break; case Q8_CE_OPCODE_POLL_WRITE_LIST: ret = qla_poll_write_list(ha, ce_hdr); //printf("qla_poll_write_list %d\n", ret); break; case Q8_CE_OPCODE_POLL_RD_LIST: ret = qla_poll_read_list(ha, ce_hdr); //printf("qla_poll_read_list %d\n", ret); break; case Q8_CE_OPCODE_READ_MODIFY_WRITE: ret = qla_read_modify_write_list(ha, ce_hdr); //printf("qla_read_modify_write_list %d\n", ret); break; case Q8_CE_OPCODE_SEQ_PAUSE: if (ce_hdr->delay_to) { qla_mdelay(__func__, ce_hdr->delay_to); } break; case Q8_CE_OPCODE_SEQ_END: proc_end = 1; break; case Q8_CE_OPCODE_TMPLT_END: *end_idx = i; return 0; } if (ret) break; buf += ce_hdr->size; } *end_idx = i; return (ret); } #ifndef QL_LDFLASH_FW static int qla_load_offchip_mem(qla_host_t *ha, uint64_t addr, uint32_t *data32, uint32_t len32) { q80_offchip_mem_val_t val; int ret = 0; while (len32) { if (len32 > 4) { val.data_lo = *data32++; val.data_hi = *data32++; val.data_ulo = *data32++; val.data_uhi = *data32++; len32 -= 4; if (ql_rdwr_offchip_mem(ha, addr, &val, 0)) return -1; addr += (uint64_t)16; } else { break; } } bzero(&val, sizeof(q80_offchip_mem_val_t)); switch (len32) { case 3: val.data_lo = *data32++; val.data_hi = *data32++; val.data_ulo = *data32++; ret = ql_rdwr_offchip_mem(ha, addr, &val, 0); break; case 2: val.data_lo = *data32++; val.data_hi = *data32++; ret = ql_rdwr_offchip_mem(ha, addr, &val, 0); break; case 1: val.data_lo = *data32++; ret = ql_rdwr_offchip_mem(ha, addr, &val, 0); break; default: break; } return ret; } static int qla_load_bootldr(qla_host_t *ha) { uint64_t addr; uint32_t *data32; uint32_t len32; int ret; addr = (uint64_t)(READ_REG32(ha, Q8_BOOTLD_ADDR)); data32 = (uint32_t *)ql83xx_bootloader; len32 = ql83xx_bootloader_len >> 2; ret = qla_load_offchip_mem(ha, addr, data32, len32); return (ret); } static int qla_load_fwimage(qla_host_t *ha) { uint64_t addr; uint32_t *data32; uint32_t len32; int ret; addr = (uint64_t)(READ_REG32(ha, Q8_FW_IMAGE_ADDR)); data32 = (uint32_t *)ql83xx_firmware; len32 = ql83xx_firmware_len >> 2; ret = qla_load_offchip_mem(ha, addr, data32, len32); return (ret); } #endif /* #ifndef QL_LDFLASH_FW */ static int qla_ld_fw_init(qla_host_t *ha) { uint8_t *buf; uint32_t index = 0, end_idx; q8_tmplt_hdr_t *hdr; bzero(ha->hw.rst_seq, sizeof (ha->hw.rst_seq)); hdr = (q8_tmplt_hdr_t *)ql83xx_resetseq; + device_printf(ha->pci_dev, "%s: reset sequence\n", __func__); if (qla_tmplt_16bit_checksum(ha, (uint16_t *)ql83xx_resetseq, (uint32_t)hdr->size)) { device_printf(ha->pci_dev, "%s: reset seq checksum failed\n", __func__); return -1; } buf = ql83xx_resetseq + hdr->stop_seq_off; -// device_printf(ha->pci_dev, "%s: stop sequence\n", __func__); + device_printf(ha->pci_dev, "%s: stop sequence\n", __func__); if (qla_tmplt_execute(ha, buf, index , &end_idx, hdr->nentries)) { device_printf(ha->pci_dev, "%s: stop seq failed\n", __func__); return -1; } index = end_idx; buf = ql83xx_resetseq + hdr->init_seq_off; -// device_printf(ha->pci_dev, "%s: init sequence\n", __func__); + device_printf(ha->pci_dev, "%s: init sequence\n", __func__); if (qla_tmplt_execute(ha, buf, index , &end_idx, hdr->nentries)) { device_printf(ha->pci_dev, "%s: init seq failed\n", __func__); return -1; } #ifdef QL_LDFLASH_FW qla_load_fw_from_flash(ha); WRITE_REG32(ha, Q8_FW_IMAGE_VALID, 0); #else if (qla_load_bootldr(ha)) return -1; if (qla_load_fwimage(ha)) return -1; WRITE_REG32(ha, Q8_FW_IMAGE_VALID, 0x12345678); #endif /* #ifdef QL_LDFLASH_FW */ index = end_idx; buf = ql83xx_resetseq + hdr->start_seq_off; -// device_printf(ha->pci_dev, "%s: start sequence\n", __func__); + device_printf(ha->pci_dev, "%s: start sequence\n", __func__); if (qla_tmplt_execute(ha, buf, index , &end_idx, hdr->nentries)) { device_printf(ha->pci_dev, "%s: init seq failed\n", __func__); return -1; } return 0; } int ql_stop_sequence(qla_host_t *ha) { uint8_t *buf; uint32_t index = 0, end_idx; q8_tmplt_hdr_t *hdr; bzero(ha->hw.rst_seq, sizeof (ha->hw.rst_seq)); hdr = (q8_tmplt_hdr_t *)ql83xx_resetseq; if (qla_tmplt_16bit_checksum(ha, (uint16_t *)ql83xx_resetseq, (uint32_t)hdr->size)) { device_printf(ha->pci_dev, "%s: reset seq checksum failed\n", __func__); return (-1); } buf = ql83xx_resetseq + hdr->stop_seq_off; device_printf(ha->pci_dev, "%s: stop sequence\n", __func__); if (qla_tmplt_execute(ha, buf, index , &end_idx, hdr->nentries)) { device_printf(ha->pci_dev, "%s: stop seq failed\n", __func__); return (-1); } return end_idx; } int ql_start_sequence(qla_host_t *ha, uint16_t index) { uint8_t *buf; uint32_t end_idx; q8_tmplt_hdr_t *hdr; bzero(ha->hw.rst_seq, sizeof (ha->hw.rst_seq)); hdr = (q8_tmplt_hdr_t *)ql83xx_resetseq; if (qla_tmplt_16bit_checksum(ha, (uint16_t *)ql83xx_resetseq, (uint32_t)hdr->size)) { device_printf(ha->pci_dev, "%s: reset seq checksum failed\n", __func__); return (-1); } buf = ql83xx_resetseq + hdr->init_seq_off; device_printf(ha->pci_dev, "%s: init sequence\n", __func__); if (qla_tmplt_execute(ha, buf, index , &end_idx, hdr->nentries)) { device_printf(ha->pci_dev, "%s: init seq failed\n", __func__); return (-1); } #ifdef QL_LDFLASH_FW qla_load_fw_from_flash(ha); WRITE_REG32(ha, Q8_FW_IMAGE_VALID, 0); #else if (qla_load_bootldr(ha)) return -1; if (qla_load_fwimage(ha)) return -1; WRITE_REG32(ha, Q8_FW_IMAGE_VALID, 0x12345678); #endif /* #ifdef QL_LDFLASH_FW */ index = end_idx; buf = ql83xx_resetseq + hdr->start_seq_off; device_printf(ha->pci_dev, "%s: start sequence\n", __func__); if (qla_tmplt_execute(ha, buf, index , &end_idx, hdr->nentries)) { device_printf(ha->pci_dev, "%s: init seq failed\n", __func__); return -1; } return (0); } Index: stable/9/sys/dev/qlxgbe/ql_os.c =================================================================== --- stable/9/sys/dev/qlxgbe/ql_os.c (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_os.c (revision 330557) @@ -1,2192 +1,2318 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: ql_os.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #include __FBSDID("$FreeBSD$"); #include "ql_os.h" #include "ql_hw.h" #include "ql_def.h" #include "ql_inline.h" #include "ql_ver.h" #include "ql_glbl.h" #include "ql_dbg.h" #include /* * Some PCI Configuration Space Related Defines */ #ifndef PCI_VENDOR_QLOGIC #define PCI_VENDOR_QLOGIC 0x1077 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP8030 #define PCI_PRODUCT_QLOGIC_ISP8030 0x8030 #endif #define PCI_QLOGIC_ISP8030 \ ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC) /* * static functions */ static int qla_alloc_parent_dma_tag(qla_host_t *ha); static void qla_free_parent_dma_tag(qla_host_t *ha); static int qla_alloc_xmt_bufs(qla_host_t *ha); static void qla_free_xmt_bufs(qla_host_t *ha); static int qla_alloc_rcv_bufs(qla_host_t *ha); static void qla_free_rcv_bufs(qla_host_t *ha); static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb); static void qla_init_ifnet(device_t dev, qla_host_t *ha); static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS); static void qla_release(qla_host_t *ha); static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void qla_stop(qla_host_t *ha); static void qla_get_peer(qla_host_t *ha); static void qla_error_recovery(void *context, int pending); static void qla_async_event(void *context, int pending); static void qla_stats(void *context, int pending); static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, uint32_t iscsi_pdu); /* * Hooks to the Operating Systems */ static int qla_pci_probe (device_t); static int qla_pci_attach (device_t); static int qla_pci_detach (device_t); static void qla_init(void *arg); static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static int qla_media_change(struct ifnet *ifp); static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); static int qla_transmit(struct ifnet *ifp, struct mbuf *mp); static void qla_qflush(struct ifnet *ifp); static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); static int qla_create_fp_taskqueues(qla_host_t *ha); static void qla_destroy_fp_taskqueues(qla_host_t *ha); static void qla_drain_fp_taskqueues(qla_host_t *ha); static device_method_t qla_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qla_pci_probe), DEVMETHOD(device_attach, qla_pci_attach), DEVMETHOD(device_detach, qla_pci_detach), { 0, 0 } }; static driver_t qla_pci_driver = { "ql", qla_pci_methods, sizeof (qla_host_t), }; static devclass_t qla83xx_devclass; DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0); MODULE_DEPEND(qla83xx, pci, 1, 1, 1); MODULE_DEPEND(qla83xx, ether, 1, 1, 1); MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver"); #define QL_STD_REPLENISH_THRES 0 #define QL_JUMBO_REPLENISH_THRES 32 static char dev_str[64]; static char ver_str[64]; /* * Name: qla_pci_probe * Function: Validate the PCI device to be a QLA80XX device */ static int qla_pci_probe(device_t dev) { switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { case PCI_QLOGIC_ISP8030: snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function", QLA_VERSION_MAJOR, QLA_VERSION_MINOR, QLA_VERSION_BUILD); snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d", QLA_VERSION_MAJOR, QLA_VERSION_MINOR, QLA_VERSION_BUILD); device_set_desc(dev, dev_str); break; default: return (ENXIO); } if (bootverbose) printf("%s: %s\n ", __func__, dev_str); return (BUS_PROBE_DEFAULT); } static void qla_add_sysctls(qla_host_t *ha) { device_t dev = ha->pci_dev; SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "version", CTLFLAG_RD, ver_str, 0, "Driver Version"); SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fw_version", CTLFLAG_RD, ha->fw_ver_str, 0, "firmware version"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW, (void *)ha, 0, qla_sysctl_get_link_status, "I", "Link Status"); ha->dbg_level = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &ha->dbg_level, ha->dbg_level, "Debug Level"); ha->enable_minidump = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_minidump", CTLFLAG_RW, &ha->enable_minidump, ha->enable_minidump, - "Minidump retrival is enabled only when this is set"); + "Minidump retrival prior to error recovery " + "is enabled only when this is set"); + ha->enable_driverstate_dump = 1; + SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "enable_driverstate_dump", CTLFLAG_RW, + &ha->enable_driverstate_dump, ha->enable_driverstate_dump, + "Driver State retrival prior to error recovery " + "is enabled only when this is set"); + + ha->enable_error_recovery = 1; + SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "enable_error_recovery", CTLFLAG_RW, + &ha->enable_error_recovery, ha->enable_error_recovery, + "when set error recovery is enabled on fatal errors " + "otherwise the port is turned offline"); + + ha->ms_delay_after_init = 1000; + SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "ms_delay_after_init", CTLFLAG_RW, + &ha->ms_delay_after_init, ha->ms_delay_after_init, + "millisecond delay after hw_init"); + ha->std_replenish = QL_STD_REPLENISH_THRES; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "std_replenish", CTLFLAG_RW, &ha->std_replenish, ha->std_replenish, "Threshold for Replenishing Standard Frames"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "ipv4_lro", CTLFLAG_RD, &ha->ipv4_lro, "number of ipv4 lro completions"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "ipv6_lro", CTLFLAG_RD, &ha->ipv6_lro, "number of ipv6 lro completions"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "tx_tso_frames", CTLFLAG_RD, &ha->tx_tso_frames, "number of Tx TSO Frames"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "hw_vlan_tx_frames", CTLFLAG_RD, &ha->hw_vlan_tx_frames, "number of Tx VLAN Frames"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "hw_lock_failed", CTLFLAG_RD, &ha->hw_lock_failed, "number of hw_lock failures"); return; } static void qla_watchdog(void *arg) { qla_host_t *ha = arg; qla_hw_t *hw; struct ifnet *ifp; hw = &ha->hw; ifp = ha->ifp; if (ha->qla_watchdog_exit) { ha->qla_watchdog_exited = 1; return; } ha->qla_watchdog_exited = 0; if (!ha->qla_watchdog_pause) { - if (ql_hw_check_health(ha) || ha->qla_initiate_recovery || - (ha->msg_from_peer == QL_PEER_MSG_RESET)) { + if (!ha->offline && + (ql_hw_check_health(ha) || ha->qla_initiate_recovery || + (ha->msg_from_peer == QL_PEER_MSG_RESET))) { - if (!(ha->dbg_level & 0x8000)) { + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + ql_update_link_state(ha); + + if (ha->enable_error_recovery) { ha->qla_watchdog_paused = 1; ha->qla_watchdog_pause = 1; - ha->qla_initiate_recovery = 0; ha->err_inject = 0; device_printf(ha->pci_dev, "%s: taskqueue_enqueue(err_task) \n", __func__); taskqueue_enqueue(ha->err_tq, &ha->err_task); - return; + } else { + if (ifp != NULL) + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + ha->offline = 1; } + return; - } else if (ha->qla_interface_up) { + } else { + if (ha->qla_interface_up) { - ha->watchdog_ticks++; + ha->watchdog_ticks++; - if (ha->watchdog_ticks > 1000) - ha->watchdog_ticks = 0; + if (ha->watchdog_ticks > 1000) + ha->watchdog_ticks = 0; - if (!ha->watchdog_ticks && QL_RUNNING(ifp)) { - taskqueue_enqueue(ha->stats_tq, &ha->stats_task); - } + if (!ha->watchdog_ticks && QL_RUNNING(ifp)) { + taskqueue_enqueue(ha->stats_tq, + &ha->stats_task); + } - if (ha->async_event) { - taskqueue_enqueue(ha->async_event_tq, - &ha->async_event_task); - } + if (ha->async_event) { + taskqueue_enqueue(ha->async_event_tq, + &ha->async_event_task); + } -#if 0 - for (i = 0; ((i < ha->hw.num_sds_rings) && - !ha->watchdog_ticks); i++) { - qla_tx_fp_t *fp = &ha->tx_fp[i]; - - if (fp->fp_taskqueue != NULL) - taskqueue_enqueue(fp->fp_taskqueue, - &fp->fp_task); } -#endif ha->qla_watchdog_paused = 0; - } else { - ha->qla_watchdog_paused = 0; } } else { ha->qla_watchdog_paused = 1; } callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, qla_watchdog, ha); } /* * Name: qla_pci_attach * Function: attaches the device to the operating system */ static int qla_pci_attach(device_t dev) { qla_host_t *ha = NULL; uint32_t rsrc_len; int i; uint32_t num_rcvq = 0; if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } memset(ha, 0, sizeof (qla_host_t)); if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) { device_printf(dev, "device is not ISP8030\n"); return (ENXIO); } ha->pci_func = pci_get_function(dev) & 0x1; ha->pci_dev = dev; pci_enable_busmaster(dev); ha->reg_rid = PCIR_BAR(0); ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, RF_ACTIVE); if (ha->pci_reg == NULL) { device_printf(dev, "unable to map any ports\n"); goto qla_pci_attach_err; } rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, ha->reg_rid); mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); + mtx_init(&ha->sp_log_lock, "qla83xx_sp_log_lock", MTX_NETWORK_LOCK, MTX_DEF); ha->flags.lock_init = 1; qla_add_sysctls(ha); ha->hw.num_sds_rings = MAX_SDS_RINGS; ha->hw.num_rds_rings = MAX_RDS_RINGS; ha->hw.num_tx_rings = NUM_TX_RINGS; ha->reg_rid1 = PCIR_BAR(2); ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid1, RF_ACTIVE); ha->msix_count = pci_msix_count(dev); if (ha->msix_count < 1 ) { device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, ha->msix_count); goto qla_pci_attach_err; } if (ha->msix_count < (ha->hw.num_sds_rings + 1)) { ha->hw.num_sds_rings = ha->msix_count - 1; } QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, ha->pci_reg1)); /* initialize hardware */ if (ql_init_hw(ha)) { device_printf(dev, "%s: ql_init_hw failed\n", __func__); goto qla_pci_attach_err; } device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, ha->fw_ver_build); snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d", ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, ha->fw_ver_build); if (qla_get_nic_partition(ha, NULL, &num_rcvq)) { device_printf(dev, "%s: qla_get_nic_partition failed\n", __func__); goto qla_pci_attach_err; } device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n", __func__, ha, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, ha->pci_reg1, num_rcvq); if ((ha->msix_count < 64) || (num_rcvq != 32)) { if (ha->hw.num_sds_rings > 15) { ha->hw.num_sds_rings = 15; } } ha->hw.num_rds_rings = ha->hw.num_sds_rings; ha->hw.num_tx_rings = ha->hw.num_sds_rings; #ifdef QL_ENABLE_ISCSI_TLV ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2; #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ ql_hw_add_sysctls(ha); ha->msix_count = ha->hw.num_sds_rings + 1; if (pci_alloc_msix(dev, &ha->msix_count)) { device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, ha->msix_count); ha->msix_count = 0; goto qla_pci_attach_err; } ha->mbx_irq_rid = 1; ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ha->mbx_irq_rid, (RF_ACTIVE | RF_SHAREABLE)); if (ha->mbx_irq == NULL) { device_printf(dev, "could not allocate mbx interrupt\n"); goto qla_pci_attach_err; } if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE), NULL, ql_mbx_isr, ha, &ha->mbx_handle)) { device_printf(dev, "could not setup mbx interrupt\n"); goto qla_pci_attach_err; } for (i = 0; i < ha->hw.num_sds_rings; i++) { ha->irq_vec[i].sds_idx = i; ha->irq_vec[i].ha = ha; ha->irq_vec[i].irq_rid = 2 + i; ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ha->irq_vec[i].irq_rid, (RF_ACTIVE | RF_SHAREABLE)); if (ha->irq_vec[i].irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto qla_pci_attach_err; } if (bus_setup_intr(dev, ha->irq_vec[i].irq, (INTR_TYPE_NET | INTR_MPSAFE), NULL, ql_isr, &ha->irq_vec[i], &ha->irq_vec[i].handle)) { device_printf(dev, "could not setup interrupt\n"); goto qla_pci_attach_err; } ha->tx_fp[i].ha = ha; ha->tx_fp[i].txr_idx = i; if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) { device_printf(dev, "%s: could not allocate tx_br[%d]\n", __func__, i); goto qla_pci_attach_err; } } if (qla_create_fp_taskqueues(ha) != 0) goto qla_pci_attach_err; printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus, ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count); ql_read_mac_addr(ha); /* allocate parent dma tag */ if (qla_alloc_parent_dma_tag(ha)) { device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", __func__); goto qla_pci_attach_err; } /* alloc all dma buffers */ if (ql_alloc_dma(ha)) { device_printf(dev, "%s: ql_alloc_dma failed\n", __func__); goto qla_pci_attach_err; } qla_get_peer(ha); if (ql_minidump_init(ha) != 0) { device_printf(dev, "%s: ql_minidump_init failed\n", __func__); goto qla_pci_attach_err; } ql_alloc_drvr_state_buffer(ha); + ql_alloc_sp_log_buffer(ha); /* create the o.s ethernet interface */ qla_init_ifnet(dev, ha); ha->flags.qla_watchdog_active = 1; ha->qla_watchdog_pause = 0; callout_init(&ha->tx_callout, TRUE); ha->flags.qla_callout_init = 1; /* create ioctl device interface */ if (ql_make_cdev(ha)) { device_printf(dev, "%s: ql_make_cdev failed\n", __func__); goto qla_pci_attach_err; } callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, qla_watchdog, ha); TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha); ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT, taskqueue_thread_enqueue, &ha->err_tq); taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq", device_get_nameunit(ha->pci_dev)); TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha); ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT, taskqueue_thread_enqueue, &ha->async_event_tq); taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq", device_get_nameunit(ha->pci_dev)); TASK_INIT(&ha->stats_task, 0, qla_stats, ha); ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT, taskqueue_thread_enqueue, &ha->stats_tq); taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq", device_get_nameunit(ha->pci_dev)); QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__)); return (0); qla_pci_attach_err: qla_release(ha); if (ha->flags.lock_init) { mtx_destroy(&ha->hw_lock); + mtx_destroy(&ha->sp_log_lock); } QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__)); return (ENXIO); } /* * Name: qla_pci_detach * Function: Unhooks the device from the operating system */ static int qla_pci_detach(device_t dev) { qla_host_t *ha = NULL; struct ifnet *ifp; if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); ifp = ha->ifp; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; QLA_LOCK(ha, __func__, -1, 0); ha->qla_detach_active = 1; qla_stop(ha); qla_release(ha); QLA_UNLOCK(ha, __func__); if (ha->flags.lock_init) { mtx_destroy(&ha->hw_lock); + mtx_destroy(&ha->sp_log_lock); } QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); return (0); } /* * SYSCTL Related Callbacks */ static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); if (ret == 1) { ha = (qla_host_t *)arg1; ql_hw_link_status(ha); } return (err); } /* * Name: qla_release * Function: Releases the resources allocated for the device */ static void qla_release(qla_host_t *ha) { device_t dev; int i; dev = ha->pci_dev; if (ha->async_event_tq) { - taskqueue_drain(ha->async_event_tq, &ha->async_event_task); + taskqueue_drain_all(ha->async_event_tq); taskqueue_free(ha->async_event_tq); } if (ha->err_tq) { - taskqueue_drain(ha->err_tq, &ha->err_task); + taskqueue_drain_all(ha->err_tq); taskqueue_free(ha->err_tq); } if (ha->stats_tq) { - taskqueue_drain(ha->stats_tq, &ha->stats_task); + taskqueue_drain_all(ha->stats_tq); taskqueue_free(ha->stats_tq); } ql_del_cdev(ha); if (ha->flags.qla_watchdog_active) { ha->qla_watchdog_exit = 1; while (ha->qla_watchdog_exited == 0) qla_mdelay(__func__, 1); } if (ha->flags.qla_callout_init) callout_stop(&ha->tx_callout); if (ha->ifp != NULL) ether_ifdetach(ha->ifp); ql_free_drvr_state_buffer(ha); + ql_free_sp_log_buffer(ha); ql_free_dma(ha); qla_free_parent_dma_tag(ha); if (ha->mbx_handle) (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle); if (ha->mbx_irq) (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid, ha->mbx_irq); for (i = 0; i < ha->hw.num_sds_rings; i++) { if (ha->irq_vec[i].handle) { (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, ha->irq_vec[i].handle); } if (ha->irq_vec[i].irq) { (void)bus_release_resource(dev, SYS_RES_IRQ, ha->irq_vec[i].irq_rid, ha->irq_vec[i].irq); } qla_free_tx_br(ha, &ha->tx_fp[i]); } qla_destroy_fp_taskqueues(ha); if (ha->msix_count) pci_release_msi(dev); -// if (ha->flags.lock_init) { -// mtx_destroy(&ha->hw_lock); -// } - if (ha->pci_reg) (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, ha->pci_reg); if (ha->pci_reg1) (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1, ha->pci_reg1); return; } /* * DMA Related Functions */ static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { *((bus_addr_t *)arg) = 0; if (error) { printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); return; } *((bus_addr_t *)arg) = segs[0].ds_addr; return; } int ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) { int ret = 0; device_t dev; bus_addr_t b_addr; dev = ha->pci_dev; QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); ret = bus_dma_tag_create( ha->parent_tag,/* parent */ dma_buf->alignment, ((bus_size_t)(1ULL << 32)),/* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_buf->size, /* maxsize */ 1, /* nsegments */ dma_buf->size, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma_buf->dma_tag); if (ret) { device_printf(dev, "%s: could not create dma tag\n", __func__); goto ql_alloc_dmabuf_exit; } ret = bus_dmamem_alloc(dma_buf->dma_tag, (void **)&dma_buf->dma_b, (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), &dma_buf->dma_map); if (ret) { bus_dma_tag_destroy(dma_buf->dma_tag); device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); goto ql_alloc_dmabuf_exit; } ret = bus_dmamap_load(dma_buf->dma_tag, dma_buf->dma_map, dma_buf->dma_b, dma_buf->size, qla_dmamap_callback, &b_addr, BUS_DMA_NOWAIT); if (ret || !b_addr) { bus_dma_tag_destroy(dma_buf->dma_tag); bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); ret = -1; goto ql_alloc_dmabuf_exit; } dma_buf->dma_addr = b_addr; ql_alloc_dmabuf_exit: QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", __func__, ret, (void *)dma_buf->dma_tag, (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, dma_buf->size)); return ret; } void ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) { bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); bus_dma_tag_destroy(dma_buf->dma_tag); } static int qla_alloc_parent_dma_tag(qla_host_t *ha) { int ret; device_t dev; dev = ha->pci_dev; /* * Allocate parent DMA Tag */ ret = bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ha->parent_tag); if (ret) { device_printf(dev, "%s: could not create parent dma tag\n", __func__); return (-1); } ha->flags.parent_tag = 1; return (0); } static void qla_free_parent_dma_tag(qla_host_t *ha) { if (ha->flags.parent_tag) { bus_dma_tag_destroy(ha->parent_tag); ha->flags.parent_tag = 0; } } /* * Name: qla_init_ifnet * Function: Creates the Network Device Interface and Registers it with the O.S */ static void qla_init_ifnet(device_t dev, qla_host_t *ha) { struct ifnet *ifp; QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); ifp = ha->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); #if __FreeBSD_version >= 1000000 if_initbaudrate(ifp, IF_Gbps(10)); ifp->if_capabilities = IFCAP_LINKSTATE; #else ifp->if_mtu = ETHERMTU; ifp->if_baudrate = (1 * 1000 * 1000 *1000); #endif /* #if __FreeBSD_version >= 1000000 */ ifp->if_init = qla_init; ifp->if_softc = ha; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = qla_ioctl; ifp->if_transmit = qla_transmit; ifp->if_qflush = qla_qflush; IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); IFQ_SET_READY(&ifp->if_snd); ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; ether_ifattach(ifp, qla_get_mac_addr(ha)); ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | + IFCAP_TSO6 | IFCAP_JUMBO_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTSO | IFCAP_LRO; ifp->if_capenable = ifp->if_capabilities; ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, NULL); ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); return; } static void qla_init_locked(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; + ql_sp_log(ha, 14, 0, 0, 0, 0, 0, 0); + qla_stop(ha); if (qla_alloc_xmt_bufs(ha) != 0) return; qla_confirm_9kb_enable(ha); if (qla_alloc_rcv_bufs(ha) != 0) return; bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; ha->stop_rcv = 0; if (ql_init_hw_if(ha) == 0) { ifp = ha->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; - ha->qla_watchdog_pause = 0; ha->hw_vlan_tx_frames = 0; ha->tx_tso_frames = 0; ha->qla_interface_up = 1; ql_update_link_state(ha); + } else { + if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE) + ha->hw.sp_log_stop = -1; } + ha->qla_watchdog_pause = 0; + return; } static void qla_init(void *arg) { qla_host_t *ha; ha = (qla_host_t *)arg; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); if (QLA_LOCK(ha, __func__, -1, 0) != 0) return; qla_init_locked(ha); QLA_UNLOCK(ha, __func__); QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); } static int qla_set_multi(qla_host_t *ha, uint32_t add_multi) { uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; struct ifmultiaddr *ifma; int mcnt = 0; struct ifnet *ifp = ha->ifp; int ret = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) break; bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); mcnt++; } if_maddr_runlock(ifp); if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP) != 0) return (-1); + ql_sp_log(ha, 12, 4, ifp->if_drv_flags, + (ifp->if_drv_flags & IFF_DRV_RUNNING), + add_multi, (uint32_t)mcnt, 0); + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if (!add_multi) { ret = qla_hw_del_all_mcast(ha); if (ret) device_printf(ha->pci_dev, "%s: qla_hw_del_all_mcast() failed\n", __func__); } if (!ret) ret = ql_hw_set_multi(ha, mta, mcnt, 1); } QLA_UNLOCK(ha, __func__); return (ret); } static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { int ret = 0; struct ifreq *ifr = (struct ifreq *)data; struct ifaddr *ifa = (struct ifaddr *)data; qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; + if (ha->offline || ha->qla_initiate_recovery) + return (ret); switch (cmd) { case SIOCSIFADDR: QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", __func__, cmd)); if (ifa->ifa_addr->sa_family == AF_INET) { ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP); if (ret) break; ifp->if_flags |= IFF_UP; + ql_sp_log(ha, 8, 3, ifp->if_drv_flags, + (ifp->if_drv_flags & IFF_DRV_RUNNING), + ntohl(IA_SIN(ifa)->sin_addr.s_addr), 0, 0); + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { qla_init_locked(ha); } QLA_UNLOCK(ha, __func__); QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr))); arp_ifinit(ifp, ifa); } else { ether_ioctl(ifp, cmd, data); } break; case SIOCSIFMTU: QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", __func__, cmd)); if (ifr->ifr_mtu > QLA_MAX_MTU) { ret = EINVAL; } else { ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP); if (ret) break; ifp->if_mtu = ifr->ifr_mtu; ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + ql_sp_log(ha, 9, 4, ifp->if_drv_flags, + (ifp->if_drv_flags & IFF_DRV_RUNNING), + ha->max_frame_size, ifp->if_mtu, 0); + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { qla_init_locked(ha); } if (ifp->if_mtu > ETHERMTU) ha->std_replenish = QL_JUMBO_REPLENISH_THRES; else ha->std_replenish = QL_STD_REPLENISH_THRES; QLA_UNLOCK(ha, __func__); } break; case SIOCSIFFLAGS: QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", __func__, cmd)); ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP); if (ret) break; + ql_sp_log(ha, 10, 4, ifp->if_drv_flags, + (ifp->if_drv_flags & IFF_DRV_RUNNING), + ha->if_flags, ifp->if_flags, 0); + if (ifp->if_flags & IFF_UP) { ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; qla_init_locked(ha); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ ha->if_flags) & IFF_PROMISC) { ret = ql_set_promisc(ha); } else if ((ifp->if_flags ^ ha->if_flags) & IFF_ALLMULTI) { ret = ql_set_allmulti(ha); } } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) qla_stop(ha); ha->if_flags = ifp->if_flags; } QLA_UNLOCK(ha, __func__); break; case SIOCADDMULTI: QL_DPRINT4(ha, (ha->pci_dev, "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); if (qla_set_multi(ha, 1)) ret = EINVAL; break; case SIOCDELMULTI: QL_DPRINT4(ha, (ha->pci_dev, "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); if (qla_set_multi(ha, 0)) ret = EINVAL; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", __func__, cmd)); ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", __func__, cmd)); if (mask & IFCAP_HWCSUM) ifp->if_capenable ^= IFCAP_HWCSUM; if (mask & IFCAP_TSO4) ifp->if_capenable ^= IFCAP_TSO4; if (mask & IFCAP_TSO6) ifp->if_capenable ^= IFCAP_TSO6; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP); if (ret) break; + ql_sp_log(ha, 11, 4, ifp->if_drv_flags, + (ifp->if_drv_flags & IFF_DRV_RUNNING), + mask, ifp->if_capenable, 0); + qla_init_locked(ha); QLA_UNLOCK(ha, __func__); } VLAN_CAPABILITIES(ifp); break; } default: QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n", __func__, cmd)); ret = ether_ioctl(ifp, cmd, data); break; } return (ret); } static int qla_media_change(struct ifnet *ifp) { qla_host_t *ha; struct ifmedia *ifm; int ret = 0; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); ifm = &ha->media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) ret = EINVAL; QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); return (ret); } static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; ql_update_link_state(ha); if (ha->hw.link_up) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); } QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\ (ha->hw.link_up ? "link_up" : "link_down"))); return; } static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, uint32_t iscsi_pdu) { bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; bus_dmamap_t map; int nsegs; int ret = -1; uint32_t tx_idx; struct mbuf *m_head = *m_headp; QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); if (m_head->m_flags & M_FLOWID) { #ifdef QL_ENABLE_ISCSI_TLV if (qla_iscsi_pdu(ha, m_head) == 0) { iscsi_pdu = 1; txr_idx = m_head->m_pkthdr.flowid & ((ha->hw.num_tx_rings >> 1) - 1); } else { txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1); } #else txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1); #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ } tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next; if (NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) { QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\ "mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\ ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head)); if (m_head) m_freem(m_head); *m_headp = NULL; return (ret); } map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (ret == EFBIG) { struct mbuf *m; QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__, m_head->m_pkthdr.len)); m = m_defrag(m_head, M_NOWAIT); if (m == NULL) { ha->err_tx_defrag++; m_freem(m_head); *m_headp = NULL; device_printf(ha->pci_dev, "%s: m_defrag() = NULL [%d]\n", __func__, ret); return (ENOBUFS); } m_head = m; *m_headp = m_head; if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT))) { ha->err_tx_dmamap_load++; device_printf(ha->pci_dev, "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", __func__, ret, m_head->m_pkthdr.len); if (ret != ENOMEM) { m_freem(m_head); *m_headp = NULL; } return (ret); } } else if (ret) { ha->err_tx_dmamap_load++; device_printf(ha->pci_dev, "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", __func__, ret, m_head->m_pkthdr.len); if (ret != ENOMEM) { m_freem(m_head); *m_headp = NULL; } return (ret); } QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet")); bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx, iscsi_pdu))) { ha->tx_ring[txr_idx].count++; if (iscsi_pdu) ha->tx_ring[txr_idx].iscsi_pkt_count++; ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head; } else { bus_dmamap_unload(ha->tx_tag, map); if (ret == EINVAL) { if (m_head) m_freem(m_head); *m_headp = NULL; } } QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); return (ret); } static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) { snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx); mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF, M_NOWAIT, &fp->tx_mtx); if (fp->tx_br == NULL) { QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for " " fp[%d, %d]\n", ha->pci_func, fp->txr_idx)); return (-ENOMEM); } return 0; } static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) { struct mbuf *mp; struct ifnet *ifp = ha->ifp; if (mtx_initialized(&fp->tx_mtx)) { if (fp->tx_br != NULL) { mtx_lock(&fp->tx_mtx); while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { m_freem(mp); } mtx_unlock(&fp->tx_mtx); buf_ring_free(fp->tx_br, M_DEVBUF); fp->tx_br = NULL; } mtx_destroy(&fp->tx_mtx); } return; } static void qla_fp_taskqueue(void *context, int pending) { qla_tx_fp_t *fp; qla_host_t *ha; struct ifnet *ifp; struct mbuf *mp = NULL; int ret = 0; uint32_t txr_idx; uint32_t iscsi_pdu = 0; uint32_t rx_pkts_left = -1; fp = context; if (fp == NULL) return; ha = (qla_host_t *)fp->ha; ifp = ha->ifp; txr_idx = fp->txr_idx; mtx_lock(&fp->tx_mtx); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) { mtx_unlock(&fp->tx_mtx); goto qla_fp_taskqueue_exit; } while (rx_pkts_left && !ha->stop_rcv && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64); #ifdef QL_ENABLE_ISCSI_TLV ql_hw_tx_done_locked(ha, fp->txr_idx); ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1))); #else ql_hw_tx_done_locked(ha, fp->txr_idx); #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ mp = drbr_peek(ifp, fp->tx_br); while (mp != NULL) { if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) { #ifdef QL_ENABLE_ISCSI_TLV if (ql_iscsi_pdu(ha, mp) == 0) { txr_idx = txr_idx + (ha->hw.num_tx_rings >> 1); iscsi_pdu = 1; } else { iscsi_pdu = 0; txr_idx = fp->txr_idx; } #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ } ret = qla_send(ha, &mp, txr_idx, iscsi_pdu); if (ret) { if (mp != NULL) drbr_putback(ifp, fp->tx_br, mp); else { drbr_advance(ifp, fp->tx_br); } mtx_unlock(&fp->tx_mtx); goto qla_fp_taskqueue_exit0; } else { drbr_advance(ifp, fp->tx_br); } /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, mp); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; mp = drbr_peek(ifp, fp->tx_br); } } mtx_unlock(&fp->tx_mtx); qla_fp_taskqueue_exit0: if (rx_pkts_left || ((mp != NULL) && ret)) { taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); } else { if (!ha->stop_rcv) { QL_ENABLE_INTERRUPTS(ha, fp->txr_idx); } } qla_fp_taskqueue_exit: QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); return; } static int qla_create_fp_taskqueues(qla_host_t *ha) { int i; uint8_t tq_name[32]; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp = &ha->tx_fp[i]; bzero(tq_name, sizeof (tq_name)); snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, taskqueue_thread_enqueue, &fp->fp_taskqueue); if (fp->fp_taskqueue == NULL) return (-1); taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", tq_name); QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, fp->fp_taskqueue)); } return (0); } static void qla_destroy_fp_taskqueues(qla_host_t *ha) { int i; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp = &ha->tx_fp[i]; if (fp->fp_taskqueue != NULL) { - taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); + taskqueue_drain_all(fp->fp_taskqueue); taskqueue_free(fp->fp_taskqueue); fp->fp_taskqueue = NULL; } } return; } static void qla_drain_fp_taskqueues(qla_host_t *ha) { int i; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp = &ha->tx_fp[i]; if (fp->fp_taskqueue != NULL) { - taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); + taskqueue_drain_all(fp->fp_taskqueue); } } return; } static int qla_transmit(struct ifnet *ifp, struct mbuf *mp) { qla_host_t *ha = (qla_host_t *)ifp->if_softc; qla_tx_fp_t *fp; int rss_id = 0; int ret = 0; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); #if __FreeBSD_version >= 1100000 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) #else if (mp->m_flags & M_FLOWID) #endif rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) % ha->hw.num_sds_rings; fp = &ha->tx_fp[rss_id]; if (fp->tx_br == NULL) { ret = EINVAL; goto qla_transmit_exit; } if (mp != NULL) { ret = drbr_enqueue(ifp, fp->tx_br, mp); } if (fp->fp_taskqueue != NULL) taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); ret = 0; qla_transmit_exit: QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); return ret; } static void qla_qflush(struct ifnet *ifp) { int i; qla_tx_fp_t *fp; struct mbuf *mp; qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); for (i = 0; i < ha->hw.num_sds_rings; i++) { fp = &ha->tx_fp[i]; if (fp == NULL) continue; if (fp->tx_br) { mtx_lock(&fp->tx_mtx); while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { m_freem(mp); } mtx_unlock(&fp->tx_mtx); } } QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); return; } static void qla_stop(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; device_t dev; int i = 0; + ql_sp_log(ha, 13, 0, 0, 0, 0, 0, 0); + dev = ha->pci_dev; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ha->qla_watchdog_pause = 1; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp; fp = &ha->tx_fp[i]; if (fp == NULL) continue; if (fp->tx_br != NULL) { mtx_lock(&fp->tx_mtx); mtx_unlock(&fp->tx_mtx); } } while (!ha->qla_watchdog_paused) qla_mdelay(__func__, 1); ha->qla_interface_up = 0; qla_drain_fp_taskqueues(ha); ql_del_hw_if(ha); qla_free_xmt_bufs(ha); qla_free_rcv_bufs(ha); return; } /* * Buffer Management Functions for Transmit and Receive Rings */ static int qla_alloc_xmt_bufs(qla_host_t *ha) { int ret = 0; uint32_t i, j; qla_tx_buf_t *txb; if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ QLA_MAX_SEGMENTS, /* nsegments */ PAGE_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &ha->tx_tag)) { device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", __func__); return (ENOMEM); } for (i = 0; i < ha->hw.num_tx_rings; i++) { bzero((void *)ha->tx_ring[i].tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); } for (j = 0; j < ha->hw.num_tx_rings; j++) { for (i = 0; i < NUM_TX_DESCRIPTORS; i++) { txb = &ha->tx_ring[j].tx_buf[i]; if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &txb->map))) { ha->err_tx_dmamap_create++; device_printf(ha->pci_dev, "%s: bus_dmamap_create failed[%d]\n", __func__, ret); qla_free_xmt_bufs(ha); return (ret); } } } return 0; } /* * Release mbuf after it sent on the wire */ static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) { QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); if (txb->m_head) { bus_dmamap_sync(ha->tx_tag, txb->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ha->tx_tag, txb->map); m_freem(txb->m_head); txb->m_head = NULL; bus_dmamap_destroy(ha->tx_tag, txb->map); txb->map = NULL; } if (txb->map) { bus_dmamap_unload(ha->tx_tag, txb->map); bus_dmamap_destroy(ha->tx_tag, txb->map); txb->map = NULL; } QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); } static void qla_free_xmt_bufs(qla_host_t *ha) { int i, j; for (j = 0; j < ha->hw.num_tx_rings; j++) { for (i = 0; i < NUM_TX_DESCRIPTORS; i++) qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]); } if (ha->tx_tag != NULL) { bus_dma_tag_destroy(ha->tx_tag); ha->tx_tag = NULL; } for (i = 0; i < ha->hw.num_tx_rings; i++) { bzero((void *)ha->tx_ring[i].tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); } return; } static int qla_alloc_rcv_std(qla_host_t *ha) { int i, j, k, r, ret = 0; qla_rx_buf_t *rxb; qla_rx_ring_t *rx_ring; for (r = 0; r < ha->hw.num_rds_rings; r++) { rx_ring = &ha->rx_ring[r]; for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &rx_ring->rx_buf[i]; ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); if (ret) { device_printf(ha->pci_dev, "%s: dmamap[%d, %d] failed\n", __func__, r, i); for (k = 0; k < r; k++) { for (j = 0; j < NUM_RX_DESCRIPTORS; j++) { rxb = &ha->rx_ring[k].rx_buf[j]; bus_dmamap_destroy(ha->rx_tag, rxb->map); } } for (j = 0; j < i; j++) { bus_dmamap_destroy(ha->rx_tag, rx_ring->rx_buf[j].map); } goto qla_alloc_rcv_std_err; } } } qla_init_hw_rcv_descriptors(ha); for (r = 0; r < ha->hw.num_rds_rings; r++) { rx_ring = &ha->rx_ring[r]; for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &rx_ring->rx_buf[i]; rxb->handle = i; if (!(ret = ql_get_mbuf(ha, rxb, NULL))) { /* * set the physical address in the * corresponding descriptor entry in the * receive ring/queue for the hba */ qla_set_hw_rcv_desc(ha, r, i, rxb->handle, rxb->paddr, (rxb->m_head)->m_pkthdr.len); } else { device_printf(ha->pci_dev, "%s: ql_get_mbuf [%d, %d] failed\n", __func__, r, i); bus_dmamap_destroy(ha->rx_tag, rxb->map); goto qla_alloc_rcv_std_err; } } } return 0; qla_alloc_rcv_std_err: return (-1); } static void qla_free_rcv_std(qla_host_t *ha) { int i, r; qla_rx_buf_t *rxb; for (r = 0; r < ha->hw.num_rds_rings; r++) { for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &ha->rx_ring[r].rx_buf[i]; if (rxb->m_head != NULL) { bus_dmamap_unload(ha->rx_tag, rxb->map); bus_dmamap_destroy(ha->rx_tag, rxb->map); m_freem(rxb->m_head); rxb->m_head = NULL; } } } return; } static int qla_alloc_rcv_bufs(qla_host_t *ha) { int i, ret = 0; if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &ha->rx_tag)) { device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", __func__); return (ENOMEM); } bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); for (i = 0; i < ha->hw.num_sds_rings; i++) { ha->hw.sds[i].sdsr_next = 0; ha->hw.sds[i].rxb_free = NULL; ha->hw.sds[i].rx_free = 0; } ret = qla_alloc_rcv_std(ha); return (ret); } static void qla_free_rcv_bufs(qla_host_t *ha) { int i; qla_free_rcv_std(ha); if (ha->rx_tag != NULL) { bus_dma_tag_destroy(ha->rx_tag); ha->rx_tag = NULL; } bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); for (i = 0; i < ha->hw.num_sds_rings; i++) { ha->hw.sds[i].sdsr_next = 0; ha->hw.sds[i].rxb_free = NULL; ha->hw.sds[i].rx_free = 0; } return; } int ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp) { register struct mbuf *mp = nmp; struct ifnet *ifp; int ret = 0; uint32_t offset; bus_dma_segment_t segs[1]; int nsegs, mbuf_size; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); ifp = ha->ifp; if (ha->hw.enable_9kb) mbuf_size = MJUM9BYTES; else mbuf_size = MCLBYTES; if (mp == NULL) { if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE)) return(-1); if (ha->hw.enable_9kb) mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size); else mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mp == NULL) { ha->err_m_getcl++; ret = ENOBUFS; device_printf(ha->pci_dev, "%s: m_getcl failed\n", __func__); goto exit_ql_get_mbuf; } mp->m_len = mp->m_pkthdr.len = mbuf_size; } else { mp->m_len = mp->m_pkthdr.len = mbuf_size; mp->m_data = mp->m_ext.ext_buf; mp->m_next = NULL; } offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); if (offset) { offset = 8 - offset; m_adj(mp, offset); } /* * Using memory from the mbuf cluster pool, invoke the bus_dma * machinery to arrange the memory mapping. */ ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map, mp, segs, &nsegs, BUS_DMA_NOWAIT); rxb->paddr = segs[0].ds_addr; if (ret || !rxb->paddr || (nsegs != 1)) { m_free(mp); rxb->m_head = NULL; device_printf(ha->pci_dev, "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", __func__, ret, (long long unsigned int)rxb->paddr, nsegs); ret = -1; goto exit_ql_get_mbuf; } rxb->m_head = mp; bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); exit_ql_get_mbuf: QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); return (ret); } static void qla_get_peer(qla_host_t *ha) { device_t *peers; int count, i, slot; int my_slot = pci_get_slot(ha->pci_dev); if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count)) return; for (i = 0; i < count; i++) { slot = pci_get_slot(peers[i]); if ((slot >= 0) && (slot == my_slot) && (pci_get_device(peers[i]) == pci_get_device(ha->pci_dev))) { if (ha->pci_dev != peers[i]) ha->peer_dev = peers[i]; } } } static void qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer) { qla_host_t *ha_peer; if (ha->peer_dev) { if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) { ha_peer->msg_from_peer = msg_to_peer; } } } +void +qla_set_error_recovery(qla_host_t *ha) +{ + struct ifnet *ifp = ha->ifp; + + if (!cold && ha->enable_error_recovery) { + if (ifp) + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + ha->qla_initiate_recovery = 1; + } else + ha->offline = 1; + return; +} + static void qla_error_recovery(void *context, int pending) { qla_host_t *ha = context; - uint32_t msecs_100 = 100; + uint32_t msecs_100 = 400; struct ifnet *ifp = ha->ifp; int i = 0; -device_printf(ha->pci_dev, "%s: \n", __func__); + device_printf(ha->pci_dev, "%s: enter\n", __func__); ha->hw.imd_compl = 1; + taskqueue_drain_all(ha->stats_tq); + taskqueue_drain_all(ha->async_event_tq); + if (QLA_LOCK(ha, __func__, -1, 0) != 0) return; -device_printf(ha->pci_dev, "%s: enter\n", __func__); + device_printf(ha->pci_dev, "%s: ts_usecs = %ld start\n", + __func__, qla_get_usec_timestamp()); if (ha->qla_interface_up) { qla_mdelay(__func__, 300); - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + //ifp->if_drv_flags &= ~IFF_DRV_RUNNING; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp; fp = &ha->tx_fp[i]; if (fp == NULL) continue; if (fp->tx_br != NULL) { mtx_lock(&fp->tx_mtx); mtx_unlock(&fp->tx_mtx); } } } - qla_drain_fp_taskqueues(ha); if ((ha->pci_func & 0x1) == 0) { if (!ha->msg_from_peer) { qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--) qla_mdelay(__func__, 100); } ha->msg_from_peer = 0; if (ha->enable_minidump) ql_minidump(ha); - (void) ql_init_hw(ha); + if (ha->enable_driverstate_dump) + ql_capture_drvr_state(ha); + if (ql_init_hw(ha)) { + device_printf(ha->pci_dev, + "%s: ts_usecs = %ld exit: ql_init_hw failed\n", + __func__, qla_get_usec_timestamp()); + ha->offline = 1; + goto qla_error_recovery_exit; + } + if (ha->qla_interface_up) { qla_free_xmt_bufs(ha); qla_free_rcv_bufs(ha); } - qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); + if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY)) + qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); } else { if (ha->msg_from_peer == QL_PEER_MSG_RESET) { ha->msg_from_peer = 0; - qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); + if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY)) + qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); } else { qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); } while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--) qla_mdelay(__func__, 100); ha->msg_from_peer = 0; - (void) ql_init_hw(ha); + if (ha->enable_driverstate_dump) + ql_capture_drvr_state(ha); - qla_mdelay(__func__, 1000); + if (msecs_100 == 0) { + device_printf(ha->pci_dev, + "%s: ts_usecs = %ld exit: QL_PEER_MSG_ACK not received\n", + __func__, qla_get_usec_timestamp()); + ha->offline = 1; + goto qla_error_recovery_exit; + } + if (ql_init_hw(ha)) { + device_printf(ha->pci_dev, + "%s: ts_usecs = %ld exit: ql_init_hw failed\n", + __func__, qla_get_usec_timestamp()); + ha->offline = 1; + goto qla_error_recovery_exit; + } + if (ha->qla_interface_up) { qla_free_xmt_bufs(ha); qla_free_rcv_bufs(ha); } } + qla_mdelay(__func__, ha->ms_delay_after_init); + + *((uint32_t *)&ha->hw.flags) = 0; + ha->qla_initiate_recovery = 0; + if (ha->qla_interface_up) { if (qla_alloc_xmt_bufs(ha) != 0) { + ha->offline = 1; goto qla_error_recovery_exit; } + qla_confirm_9kb_enable(ha); if (qla_alloc_rcv_bufs(ha) != 0) { + ha->offline = 1; goto qla_error_recovery_exit; } ha->stop_rcv = 0; if (ql_init_hw_if(ha) == 0) { ifp = ha->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ha->qla_watchdog_pause = 0; + ql_update_link_state(ha); + } else { + ha->offline = 1; + + if (ha->hw.sp_log_stop_events & + Q8_SP_LOG_STOP_IF_START_FAILURE) + ha->hw.sp_log_stop = -1; } - } else + } else { ha->qla_watchdog_pause = 0; + } qla_error_recovery_exit: -device_printf(ha->pci_dev, "%s: exit\n", __func__); + if (ha->offline ) { + device_printf(ha->pci_dev, "%s: ts_usecs = %ld port offline\n", + __func__, qla_get_usec_timestamp()); + if (ha->hw.sp_log_stop_events & + Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE) + ha->hw.sp_log_stop = -1; + } + QLA_UNLOCK(ha, __func__); - callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, - qla_watchdog, ha); + if (!ha->offline) + callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, + qla_watchdog, ha); + + device_printf(ha->pci_dev, + "%s: ts_usecs = %ld exit\n", + __func__, qla_get_usec_timestamp()); return; } static void qla_async_event(void *context, int pending) { qla_host_t *ha = context; if (QLA_LOCK(ha, __func__, -1, 0) != 0) return; if (ha->async_event) { ha->async_event = 0; qla_hw_async_event(ha); } QLA_UNLOCK(ha, __func__); return; } static void qla_stats(void *context, int pending) { qla_host_t *ha; ha = context; ql_get_stats(ha); + return; } Index: stable/9/sys/dev/qlxgbe/ql_os.h =================================================================== --- stable/9/sys/dev/qlxgbe/ql_os.h (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_os.h (revision 330557) @@ -1,167 +1,175 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * File: ql_os.h * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #ifndef _QL_OS_H_ #define _QL_OS_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 900044 #error FreeBSD Version not supported - use version >= 900044 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define QLA_USEC_DELAY(usec) DELAY(usec) static __inline int qla_ms_to_hz(int ms) { int qla_hz; struct timeval t; t.tv_sec = ms / 1000; t.tv_usec = (ms % 1000) * 1000; qla_hz = tvtohz(&t); if (qla_hz < 0) qla_hz = 0x7fffffff; if (!qla_hz) qla_hz = 1; return (qla_hz); } static __inline int qla_sec_to_hz(int sec) { struct timeval t; t.tv_sec = sec; t.tv_usec = 0; return (tvtohz(&t)); } +static __inline uint64_t qla_get_usec_timestamp(void) +{ + struct timeval tv; + + microuptime(&tv); + + return ((uint64_t)(((uint64_t)tv.tv_sec) * 1000000 + tv.tv_usec)); +} #define qla_host_to_le16(x) htole16(x) #define qla_host_to_le32(x) htole32(x) #define qla_host_to_le64(x) htole64(x) #define qla_host_to_be16(x) htobe16(x) #define qla_host_to_be32(x) htobe32(x) #define qla_host_to_be64(x) htobe64(x) #define qla_le16_to_host(x) le16toh(x) #define qla_le32_to_host(x) le32toh(x) #define qla_le64_to_host(x) le64toh(x) #define qla_be16_to_host(x) be16toh(x) #define qla_be32_to_host(x) be32toh(x) #define qla_be64_to_host(x) be64toh(x) MALLOC_DECLARE(M_QLA83XXBUF); #define qla_mdelay(fn, msecs) \ {\ if (cold) \ DELAY((msecs * 1000)); \ else \ pause(fn, qla_ms_to_hz(msecs)); \ } /* * Locks */ #define QLA_LOCK(ha, str, to_ms, no_sleep) qla_lock(ha, str, to_ms, no_sleep) #define QLA_UNLOCK(ha, str) qla_unlock(ha, str) /* * structure encapsulating a DMA buffer */ struct qla_dma { bus_size_t alignment; uint32_t size; void *dma_b; bus_addr_t dma_addr; bus_dmamap_t dma_map; bus_dma_tag_t dma_tag; }; typedef struct qla_dma qla_dma_t; #endif /* #ifndef _QL_OS_H_ */ Index: stable/9/sys/dev/qlxgbe/ql_ver.h =================================================================== --- stable/9/sys/dev/qlxgbe/ql_ver.h (revision 330556) +++ stable/9/sys/dev/qlxgbe/ql_ver.h (revision 330557) @@ -1,41 +1,41 @@ /* * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * File: ql_ver.h * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #ifndef _QL_VER_H_ #define _QL_VER_H_ #define QLA_VERSION_MAJOR 3 #define QLA_VERSION_MINOR 10 -#define QLA_VERSION_BUILD 35 +#define QLA_VERSION_BUILD 36 #endif /* #ifndef _QL_VER_H_ */ Index: stable/9/sys =================================================================== --- stable/9/sys (revision 330556) +++ stable/9/sys (revision 330557) Property changes on: stable/9/sys ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys:r329855 Index: stable/9 =================================================================== --- stable/9 (revision 330556) +++ stable/9 (revision 330557) Property changes on: stable/9 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r329855