Index: head/sys/dev/qlxgbe/ql_def.h
===================================================================
--- head/sys/dev/qlxgbe/ql_def.h	(revision 321232)
+++ head/sys/dev/qlxgbe/ql_def.h	(revision 321233)
@@ -1,272 +1,272 @@
 /*
  * Copyright (c) 2013-2016 Qlogic Corporation
  * All rights reserved.
  *
  *  Redistribution and use in source and binary forms, with or without
  *  modification, are permitted provided that the following conditions
  *  are met:
  *
  *  1. Redistributions of source code must retain the above copyright
  *     notice, this list of conditions and the following disclaimer.
  *  2. Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
  *
  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  *  POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 
 /*
  * File: ql_def.h
  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
  */
 
 #ifndef _QL_DEF_H_
 #define _QL_DEF_H_
 
 #define BIT_0                   (0x1 << 0)
 #define BIT_1                   (0x1 << 1)
 #define BIT_2                   (0x1 << 2)
 #define BIT_3                   (0x1 << 3)
 #define BIT_4                   (0x1 << 4)
 #define BIT_5                   (0x1 << 5)
 #define BIT_6                   (0x1 << 6)
 #define BIT_7                   (0x1 << 7)
 #define BIT_8                   (0x1 << 8)
 #define BIT_9                   (0x1 << 9)
 #define BIT_10                  (0x1 << 10)
 #define BIT_11                  (0x1 << 11)
 #define BIT_12                  (0x1 << 12)
 #define BIT_13                  (0x1 << 13)
 #define BIT_14                  (0x1 << 14)
 #define BIT_15                  (0x1 << 15)
 #define BIT_16                  (0x1 << 16)
 #define BIT_17                  (0x1 << 17)
 #define BIT_18                  (0x1 << 18)
 #define BIT_19                  (0x1 << 19)
 #define BIT_20                  (0x1 << 20)
 #define BIT_21                  (0x1 << 21)
 #define BIT_22                  (0x1 << 22)
 #define BIT_23                  (0x1 << 23)
 #define BIT_24                  (0x1 << 24)
 #define BIT_25                  (0x1 << 25)
 #define BIT_26                  (0x1 << 26)
 #define BIT_27                  (0x1 << 27)
 #define BIT_28                  (0x1 << 28)
 #define BIT_29                  (0x1 << 29)
 #define BIT_30                  (0x1 << 30)
 #define BIT_31                  (0x1 << 31)
 
 struct qla_rx_buf {
 	struct mbuf	*m_head;
 	bus_dmamap_t	map;
 	bus_addr_t      paddr;
 	uint32_t	handle;
 	void		*next;
 };
 typedef struct qla_rx_buf qla_rx_buf_t;
 
 struct qla_rx_ring {
 	qla_rx_buf_t	rx_buf[NUM_RX_DESCRIPTORS];
 };
 typedef struct qla_rx_ring qla_rx_ring_t;
 
 struct qla_tx_buf {
 	struct mbuf	*m_head;
 	bus_dmamap_t	map;
 };
 typedef struct qla_tx_buf qla_tx_buf_t;
 
 #define QLA_MAX_SEGMENTS	62	/* maximum # of segs in a sg list */
 #define QLA_MAX_MTU		9000
 #define QLA_STD_FRAME_SIZE	1514
 #define QLA_MAX_TSO_FRAME_SIZE	((64 * 1024 - 1) + 22)
 
 /* Number of MSIX/MSI Vectors required */
 
 struct qla_ivec {
 	uint32_t		sds_idx;
 	void			*ha;
 	struct resource		*irq;
 	void			*handle;
 	int			irq_rid;
 };
 
 typedef struct qla_ivec qla_ivec_t;
 
-#define QLA_WATCHDOG_CALLOUT_TICKS	1
+#define QLA_WATCHDOG_CALLOUT_TICKS	2
 
 typedef struct _qla_tx_ring {
 	qla_tx_buf_t	tx_buf[NUM_TX_DESCRIPTORS];
 	uint64_t	count;
 } qla_tx_ring_t;
 
 typedef struct _qla_tx_fp {
 	struct mtx		tx_mtx;
 	char			tx_mtx_name[32];
 	struct buf_ring		*tx_br;
 	struct task		fp_task;
 	struct taskqueue	*fp_taskqueue;
 	void			*ha;
 	uint32_t		txr_idx;
 } qla_tx_fp_t;
 
 /*
  * Adapter structure contains the hardware independent information of the
  * pci function.
  */
 struct qla_host {
         volatile struct {
                 volatile uint32_t
 			qla_interface_up        :1,
 			qla_callout_init	:1,
 			qla_watchdog_active	:1,
 			qla_watchdog_exit	:1,
 			qla_watchdog_pause	:1,
 			stop_rcv		:1,
 			parent_tag		:1,
 			lock_init		:1;
         } flags;
 
 	volatile uint32_t	qla_watchdog_exited;
 	volatile uint32_t	qla_watchdog_paused;
 	volatile uint32_t	qla_initiate_recovery;
 
 	device_t		pci_dev;
 
 	uint16_t		watchdog_ticks;
 	uint8_t			pci_func;
 	uint8_t			resvd;
 
         /* ioctl related */
         struct cdev             *ioctl_dev;
 
 	/* register mapping */
 	struct resource		*pci_reg;
 	int			reg_rid;
 	struct resource		*pci_reg1;
 	int			reg_rid1;
 
 	/* interrupts */
 	struct resource         *mbx_irq;
 	void			*mbx_handle;
 	int			mbx_irq_rid;
 
 	int			msix_count;
 
 	qla_ivec_t		irq_vec[MAX_SDS_RINGS];
 	
 	/* parent dma tag */
 	bus_dma_tag_t           parent_tag;
 
 	/* interface to o.s */
 	struct ifnet		*ifp;
 
 	struct ifmedia		media;
 	uint16_t		max_frame_size;
 	uint16_t		rsrvd0;
 	int			if_flags;
 
 	/* hardware access lock */
 
 	struct mtx		hw_lock;
 	volatile uint32_t	hw_lock_held;
 
 	/* transmit and receive buffers */
 	uint32_t		txr_idx; /* index of the current tx ring */
 	qla_tx_ring_t		tx_ring[NUM_TX_RINGS];
 						
 	bus_dma_tag_t		tx_tag;
 	struct callout		tx_callout;
 
 	qla_tx_fp_t		tx_fp[MAX_SDS_RINGS];
 
 	qla_rx_ring_t		rx_ring[MAX_RDS_RINGS];
 	bus_dma_tag_t		rx_tag;
 	uint32_t		std_replenish;
 
 	qla_rx_buf_t		*rxb_free;
 	uint32_t		rxb_free_count;
 	volatile uint32_t	posting;
 
 	/* stats */
 	uint32_t		err_m_getcl;
 	uint32_t		err_m_getjcl;
 	uint32_t		err_tx_dmamap_create;
 	uint32_t		err_tx_dmamap_load;
 	uint32_t		err_tx_defrag;
 
 	uint64_t		rx_frames;
 	uint64_t		rx_bytes;
 
 	uint64_t		lro_pkt_count;
 	uint64_t		lro_bytes;
 
 	uint64_t		ipv4_lro;
 	uint64_t		ipv6_lro;
 
 	uint64_t		tx_frames;
 	uint64_t		tx_bytes;
 	uint64_t		tx_tso_frames;
 	uint64_t		hw_vlan_tx_frames;
 
         uint32_t                fw_ver_major;
         uint32_t                fw_ver_minor;
         uint32_t                fw_ver_sub;
         uint32_t                fw_ver_build;
 
 	/* hardware specific */
 	qla_hw_t		hw;
 
 	/* debug stuff */
 	volatile const char 	*qla_lock;
 	volatile const char	*qla_unlock;
 	uint32_t		dbg_level;
 
 	uint8_t			fw_ver_str[32];
 
 	/* Error Injection Related */
 	uint32_t		err_inject;
 	struct task		err_task;
 	struct taskqueue	*err_tq;
 
 	/* Async Event Related */
 	uint32_t                async_event;
 	struct task             async_event_task;
 	struct taskqueue        *async_event_tq;
 
 	/* Peer Device */
 	device_t		peer_dev;
 
 	volatile uint32_t	msg_from_peer;
 #define QL_PEER_MSG_RESET	0x01
 #define QL_PEER_MSG_ACK		0x02
 
 };
 typedef struct qla_host qla_host_t;
 
 /* note that align has to be a power of 2 */
 #define QL_ALIGN(size, align) (size + (align - 1)) & ~(align - 1);
 #define QL_MIN(x, y) ((x < y) ? x : y)
 
 #define QL_RUNNING(ifp) \
 		((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
 			IFF_DRV_RUNNING)
 
 /* Return 0, if identical, else 1 */
 #define QL_MAC_CMP(mac1, mac2)    \
 	((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \
 	(*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1)
 
 #endif /* #ifndef _QL_DEF_H_ */
Index: head/sys/dev/qlxgbe/ql_hw.c
===================================================================
--- head/sys/dev/qlxgbe/ql_hw.c	(revision 321232)
+++ head/sys/dev/qlxgbe/ql_hw.c	(revision 321233)
@@ -1,5137 +1,5145 @@
 /*
  * Copyright (c) 2013-2016 Qlogic Corporation
  * All rights reserved.
  *
  *  Redistribution and use in source and binary forms, with or without
  *  modification, are permitted provided that the following conditions
  *  are met:
  *
  *  1. Redistributions of source code must retain the above copyright
  *     notice, this list of conditions and the following disclaimer.
  *  2. Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
  *
  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  *  POSSIBILITY OF SUCH DAMAGE.
  */
 
 /*
  * File: ql_hw.c
  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
  * Content: Contains Hardware dependent functions
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include "ql_os.h"
 #include "ql_hw.h"
 #include "ql_def.h"
 #include "ql_inline.h"
 #include "ql_ver.h"
 #include "ql_glbl.h"
 #include "ql_dbg.h"
 #include "ql_minidump.h"
 
 /*
  * Static Functions
  */
 
 static void qla_del_rcv_cntxt(qla_host_t *ha);
 static int qla_init_rcv_cntxt(qla_host_t *ha);
 static void qla_del_xmt_cntxt(qla_host_t *ha);
 static int qla_init_xmt_cntxt(qla_host_t *ha);
 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
 	uint32_t num_intrs, uint32_t create);
 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
 	int tenable, int rcv);
 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
 
 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
 		uint8_t *hdr);
 static int qla_hw_add_all_mcast(qla_host_t *ha);
 static int qla_hw_del_all_mcast(qla_host_t *ha);
 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
 
 static int qla_init_nic_func(qla_host_t *ha);
 static int qla_stop_nic_func(qla_host_t *ha);
 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
 static void qla_get_quick_stats(qla_host_t *ha);
 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
 static int qla_get_cam_search_mode(qla_host_t *ha);
 
 static void ql_minidump_free(qla_host_t *ha);
 
 
 static int
 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
 {
         int err = 0, ret;
         qla_host_t *ha;
 	uint32_t i;
 
         err = sysctl_handle_int(oidp, &ret, 0, req);
 
         if (err || !req->newptr)
                 return (err);
 
         if (ret == 1) {
 
                 ha = (qla_host_t *)arg1;
 
 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
 
 			device_printf(ha->pci_dev,
 				"%s: sds_ring[%d] = %p\n", __func__,i,
 				(void *)ha->hw.sds[i].intr_count);
 
 			device_printf(ha->pci_dev,
 				"%s: sds_ring[%d].spurious_intr_count = %p\n",
 				__func__,
 				i, (void *)ha->hw.sds[i].spurious_intr_count);
 
 			device_printf(ha->pci_dev,
 				"%s: sds_ring[%d].rx_free = %d\n", __func__,i,
 				ha->hw.sds[i].rx_free);
 		}
 
 		for (i = 0; i < ha->hw.num_tx_rings; i++) 
 			device_printf(ha->pci_dev,
 				"%s: tx[%d] = %p\n", __func__,i,
 				(void *)ha->tx_ring[i].count);
 
 		for (i = 0; i < ha->hw.num_rds_rings; i++)
 			device_printf(ha->pci_dev,
 				"%s: rds_ring[%d] = %p\n", __func__,i,
 				(void *)ha->hw.rds[i].count);
 
 		device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
 			(void *)ha->lro_pkt_count);
 
 		device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
 			(void *)ha->lro_bytes);
 
 #ifdef QL_ENABLE_ISCSI_TLV
 		device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
 			(void *)ha->hw.iscsi_pkt_count);
 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
 
 	}
 	return (err);
 }
 
 static int
 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
 {
 	int err, ret = 0;
 	qla_host_t *ha;
 
 	err = sysctl_handle_int(oidp, &ret, 0, req);
 
 	if (err || !req->newptr)
 		return (err);
 
 	if (ret == 1) {
 		ha = (qla_host_t *)arg1;
 		qla_get_quick_stats(ha);
 	}
 	return (err);
 }
 
 #ifdef QL_DBG
 
 static void
 qla_stop_pegs(qla_host_t *ha)
 {
         uint32_t val = 1;
 
         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
 }
 
 static int
 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
 {
 	int err, ret = 0;
 	qla_host_t *ha;
 	
 	err = sysctl_handle_int(oidp, &ret, 0, req);
 
 
 	if (err || !req->newptr)
 		return (err);
 
 	if (ret == 1) {
 		ha = (qla_host_t *)arg1;
 		QLA_LOCK(ha);
 		qla_stop_pegs(ha);	
 		QLA_UNLOCK(ha);
 	}
 
 	return err;
 }
 #endif /* #ifdef QL_DBG */
 
 static int
 qla_validate_set_port_cfg_bit(uint32_t bits)
 {
         if ((bits & 0xF) > 1)
                 return (-1);
 
         if (((bits >> 4) & 0xF) > 2)
                 return (-1);
 
         if (((bits >> 8) & 0xF) > 2)
                 return (-1);
 
         return (0);
 }
 
 static int
 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
 {
         int err, ret = 0;
         qla_host_t *ha;
         uint32_t cfg_bits;
 
         err = sysctl_handle_int(oidp, &ret, 0, req);
 
         if (err || !req->newptr)
                 return (err);
 
         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
 
                 ha = (qla_host_t *)arg1;
 
                 err = qla_get_port_config(ha, &cfg_bits);
 
                 if (err)
                         goto qla_sysctl_set_port_cfg_exit;
 
                 if (ret & 0x1) {
                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
                 } else {
                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
                 }
 
                 ret = ret >> 4;
                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
 
                 if ((ret & 0xF) == 0) {
                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
                 } else if ((ret & 0xF) == 1){
                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
                 } else {
                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
                 }
 
                 ret = ret >> 4;
                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
 
                 if (ret == 0) {
                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
                 } else if (ret == 1){
                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
                 } else {
                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
                 }
 
                 err = qla_set_port_config(ha, cfg_bits);
         } else {
                 ha = (qla_host_t *)arg1;
 
                 err = qla_get_port_config(ha, &cfg_bits);
         }
 
 qla_sysctl_set_port_cfg_exit:
         return err;
 }
 
 static int
 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
 {
 	int err, ret = 0;
 	qla_host_t *ha;
 
 	err = sysctl_handle_int(oidp, &ret, 0, req);
 
 	if (err || !req->newptr)
 		return (err);
 
 	ha = (qla_host_t *)arg1;
 
 	if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
 		(ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
 		err = qla_set_cam_search_mode(ha, (uint32_t)ret);
 	} else {
 		device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
 	}
 
 	return (err);
 }
 
 static int
 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
 {
 	int err, ret = 0;
 	qla_host_t *ha;
 
 	err = sysctl_handle_int(oidp, &ret, 0, req);
 
 	if (err || !req->newptr)
 		return (err);
 
 	ha = (qla_host_t *)arg1;
 	err = qla_get_cam_search_mode(ha);
 
 	return (err);
 }
 
 
 /*
  * Name: ql_hw_add_sysctls
  * Function: Add P3Plus specific sysctls
  */
 void
 ql_hw_add_sysctls(qla_host_t *ha)
 {
         device_t	dev;
 
         dev = ha->pci_dev;
 
 	ha->hw.num_sds_rings = MAX_SDS_RINGS;
 	ha->hw.num_rds_rings = MAX_RDS_RINGS;
 	ha->hw.num_tx_rings = NUM_TX_RINGS;
 
 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 		OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
 		ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
 
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
 		ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
 
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
 		ha->hw.num_tx_rings, "Number of Transmit Rings");
 
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
 		ha->txr_idx, "Tx Ring Used");
 
 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 		OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
 		(void *)ha, 0,
 		qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
 
         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
                 (void *)ha, 0,
                 qla_sysctl_get_quick_stats, "I", "Quick Statistics");
 
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
 		ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
 
 	ha->hw.sds_cidx_thres = 32;
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
 		ha->hw.sds_cidx_thres,
 		"Number of SDS entries to process before updating"
 		" SDS Ring Consumer Index");
 
 	ha->hw.rds_pidx_thres = 32;
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
 		ha->hw.rds_pidx_thres,
 		"Number of Rcv Rings Entries to post before updating"
 		" RDS Ring Producer Index");
 
         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
                 &ha->hw.rcv_intr_coalesce,
                 ha->hw.rcv_intr_coalesce,
                 "Rcv Intr Coalescing Parameters\n"
                 "\tbits 15:0 max packets\n"
                 "\tbits 31:16 max micro-seconds to wait\n"
                 "\tplease run\n"
                 "\tifconfig <if> down && ifconfig <if> up\n"
                 "\tto take effect \n");
 
         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
                 &ha->hw.xmt_intr_coalesce,
                 ha->hw.xmt_intr_coalesce,
                 "Xmt Intr Coalescing Parameters\n"
                 "\tbits 15:0 max packets\n"
                 "\tbits 31:16 max micro-seconds to wait\n"
                 "\tplease run\n"
                 "\tifconfig <if> down && ifconfig <if> up\n"
                 "\tto take effect \n");
 
         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
                 (void *)ha, 0,
                 qla_sysctl_port_cfg, "I",
                         "Set Port Configuration if values below "
                         "otherwise Get Port Configuration\n"
                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
                         " 1 = xmt only; 2 = rcv only;\n"
                 );
 
 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 		OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
 		(void *)ha, 0,
 		qla_sysctl_set_cam_search_mode, "I",
 			"Set CAM Search Mode"
 			"\t 1 = search mode internal\n"
 			"\t 2 = search mode auto\n");
 
 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 		OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
 		(void *)ha, 0,
 		qla_sysctl_get_cam_search_mode, "I",
 			"Get CAM Search Mode"
 			"\t 1 = search mode internal\n"
 			"\t 2 = search mode auto\n");
 
         ha->hw.enable_9kb = 1;
 
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
 
         ha->hw.enable_hw_lro = 1;
 
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
 		"\t 1 : Hardware LRO if LRO is enabled\n"
 		"\t 0 : Software LRO if LRO is enabled\n"
 		"\t Any change requires ifconfig down/up to take effect\n"
 		"\t Note that LRO may be turned off/on via ifconfig\n");
 
 	ha->hw.mdump_active = 0;
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
 		ha->hw.mdump_active,
 		"Minidump retrieval is Active");
 
 	ha->hw.mdump_done = 0;
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "mdump_done", CTLFLAG_RW,
 		&ha->hw.mdump_done, ha->hw.mdump_done,
 		"Minidump has been done and available for retrieval");
 
 	ha->hw.mdump_capture_mask = 0xF;
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
 		&ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
 		"Minidump capture mask");
 #ifdef QL_DBG
 
 	ha->err_inject = 0;
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "err_inject",
                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
                 "Error to be injected\n"
                 "\t\t\t 0: No Errors\n"
                 "\t\t\t 1: rcv: rxb struct invalid\n"
                 "\t\t\t 2: rcv: mp == NULL\n"
                 "\t\t\t 3: lro: rxb struct invalid\n"
                 "\t\t\t 4: lro: mp == NULL\n"
                 "\t\t\t 5: rcv: num handles invalid\n"
                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
                 "\t\t\t 8: mbx: mailbox command failure\n"
                 "\t\t\t 9: heartbeat failure\n"
                 "\t\t\t A: temperature failure\n"
 		"\t\t\t 11: m_getcl or m_getjcl failure\n" );
 
 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
                 (void *)ha, 0,
                 qla_sysctl_stop_pegs, "I", "Peg Stop");
 
 #endif /* #ifdef QL_DBG */
 
         ha->hw.user_pri_nic = 0;
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
                 ha->hw.user_pri_nic,
                 "VLAN Tag User Priority for Normal Ethernet Packets");
 
         ha->hw.user_pri_iscsi = 4;
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
                 ha->hw.user_pri_iscsi,
                 "VLAN Tag User Priority for iSCSI Packets");
 
 }
 
 void
 ql_hw_link_status(qla_host_t *ha)
 {
 	device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
 
 	if (ha->hw.link_up) {
 		device_printf(ha->pci_dev, "link Up\n");
 	} else {
 		device_printf(ha->pci_dev, "link Down\n");
 	}
 
 	if (ha->hw.flags.fduplex) {
 		device_printf(ha->pci_dev, "Full Duplex\n");
 	} else {
 		device_printf(ha->pci_dev, "Half Duplex\n");
 	}
 
 	if (ha->hw.flags.autoneg) {
 		device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
 	} else {
 		device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
 	}
 
 	switch (ha->hw.link_speed) {
 	case 0x710:
 		device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
 		break;
 
 	case 0x3E8:
 		device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
 		break;
 
 	case 0x64:
 		device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
 		break;
 
 	default:
 		device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
 		break;
 	}
 
 	switch (ha->hw.module_type) {
 
 	case 0x01:
 		device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
 		break;
 
 	case 0x02:
 		device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
 		break;
 
 	case 0x03:
 		device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
 		break;
 
 	case 0x04:
 		device_printf(ha->pci_dev,
 			"Module Type 10GE Passive Copper(Compliant)[%d m]\n",
 			ha->hw.cable_length);
 		break;
 
 	case 0x05:
 		device_printf(ha->pci_dev, "Module Type 10GE Active"
 			" Limiting Copper(Compliant)[%d m]\n",
 			ha->hw.cable_length);
 		break;
 
 	case 0x06:
 		device_printf(ha->pci_dev,
 			"Module Type 10GE Passive Copper"
 			" (Legacy, Best Effort)[%d m]\n",
 			ha->hw.cable_length);
 		break;
 
 	case 0x07:
 		device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
 		break;
 
 	case 0x08:
 		device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
 		break;
 
 	case 0x09:
 		device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
 		break;
 
 	case 0x0A:
 		device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
 		break;
 
 	case 0x0B:
 		device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
 			"(Legacy, Best Effort)\n");
 		break;
 
 	default:
 		device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
 			ha->hw.module_type);
 		break;
 	}
 
 	if (ha->hw.link_faults == 1)
 		device_printf(ha->pci_dev, "SFP Power Fault\n");
 }
 
 /*
  * Name: ql_free_dma
  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
  */
 void
 ql_free_dma(qla_host_t *ha)
 {
 	uint32_t i;
 
         if (ha->hw.dma_buf.flags.sds_ring) {
 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
 			ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
 		}
         	ha->hw.dma_buf.flags.sds_ring = 0;
 	}
 
         if (ha->hw.dma_buf.flags.rds_ring) {
 		for (i = 0; i < ha->hw.num_rds_rings; i++) {
 			ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
 		}
         	ha->hw.dma_buf.flags.rds_ring = 0;
 	}
 
         if (ha->hw.dma_buf.flags.tx_ring) {
 		ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
         	ha->hw.dma_buf.flags.tx_ring = 0;
 	}
 	ql_minidump_free(ha);
 }
 
 /*
  * Name: ql_alloc_dma
  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
  */
 int
 ql_alloc_dma(qla_host_t *ha)
 {
         device_t                dev;
 	uint32_t		i, j, size, tx_ring_size;
 	qla_hw_t		*hw;
 	qla_hw_tx_cntxt_t	*tx_cntxt;
 	uint8_t			*vaddr;
 	bus_addr_t		paddr;
 
         dev = ha->pci_dev;
 
         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
 
 	hw = &ha->hw;
 	/*
 	 * Allocate Transmit Ring
 	 */
 	tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
 	size = (tx_ring_size * ha->hw.num_tx_rings);
 
 	hw->dma_buf.tx_ring.alignment = 8;
 	hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
 	
         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
                 goto ql_alloc_dma_exit;
         }
 
 	vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
 	paddr = hw->dma_buf.tx_ring.dma_addr;
 	
 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
 		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
 
 		tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
 		tx_cntxt->tx_ring_paddr = paddr;
 
 		vaddr += tx_ring_size;
 		paddr += tx_ring_size;
 	}
 
 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
 		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
 
 		tx_cntxt->tx_cons = (uint32_t *)vaddr;
 		tx_cntxt->tx_cons_paddr = paddr;
 
 		vaddr += sizeof (uint32_t);
 		paddr += sizeof (uint32_t);
 	}
 
         ha->hw.dma_buf.flags.tx_ring = 1;
 
 	QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
 		__func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
 		hw->dma_buf.tx_ring.dma_b));
 	/*
 	 * Allocate Receive Descriptor Rings
 	 */
 
 	for (i = 0; i < hw->num_rds_rings; i++) {
 
 		hw->dma_buf.rds_ring[i].alignment = 8;
 		hw->dma_buf.rds_ring[i].size =
 			(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
 
 		if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
 			device_printf(dev, "%s: rds ring[%d] alloc failed\n",
 				__func__, i);
 
 			for (j = 0; j < i; j++)
 				ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
 
 			goto ql_alloc_dma_exit;
 		}
 		QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
 			__func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
 			hw->dma_buf.rds_ring[i].dma_b));
 	}
 
 	hw->dma_buf.flags.rds_ring = 1;
 
 	/*
 	 * Allocate Status Descriptor Rings
 	 */
 
 	for (i = 0; i < hw->num_sds_rings; i++) {
 		hw->dma_buf.sds_ring[i].alignment = 8;
 		hw->dma_buf.sds_ring[i].size =
 			(sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
 
 		if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
 			device_printf(dev, "%s: sds ring alloc failed\n",
 				__func__);
 
 			for (j = 0; j < i; j++)
 				ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
 
 			goto ql_alloc_dma_exit;
 		}
 		QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
 			__func__, i,
 			(void *)(hw->dma_buf.sds_ring[i].dma_addr),
 			hw->dma_buf.sds_ring[i].dma_b));
 	}
 	for (i = 0; i < hw->num_sds_rings; i++) {
 		hw->sds[i].sds_ring_base =
 			(q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
 	}
 
 	hw->dma_buf.flags.sds_ring = 1;
 
 	return 0;
 
 ql_alloc_dma_exit:
 	ql_free_dma(ha);
 	return -1;
 }
 
 #define Q8_MBX_MSEC_DELAY	5000
 
 static int
 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
 	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
 {
 	uint32_t i;
 	uint32_t data;
 	int ret = 0;
 
 	if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
 		ret = -3;
 		ha->qla_initiate_recovery = 1;
 		goto exit_qla_mbx_cmd;
 	}
 
 	if (no_pause)
 		i = 1000;
 	else
 		i = Q8_MBX_MSEC_DELAY;
 
 	while (i) {
 		data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
 		if (data == 0)
 			break;
 		if (no_pause) {
 			DELAY(1000);
 		} else {
 			qla_mdelay(__func__, 1);
 		}
 		i--;
 	}
 
 	if (i == 0) {
 		device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
 			__func__, data);
 		ret = -1;
 		ha->qla_initiate_recovery = 1;
 		goto exit_qla_mbx_cmd;
 	}
 
 	for (i = 0; i < n_hmbox; i++) {
 		WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
 		h_mbox++;
 	}
 
 	WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
 
 
 	i = Q8_MBX_MSEC_DELAY;
 	while (i) {
 		data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
 
 		if ((data & 0x3) == 1) {
 			data = READ_REG32(ha, Q8_FW_MBOX0);
 			if ((data & 0xF000) != 0x8000)
 				break;
 		}
 		if (no_pause) {
 			DELAY(1000);
 		} else {
 			qla_mdelay(__func__, 1);
 		}
 		i--;
 	}
 	if (i == 0) {
 		device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
 			__func__, data);
 		ret = -2;
 		ha->qla_initiate_recovery = 1;
 		goto exit_qla_mbx_cmd;
 	}
 
 	for (i = 0; i < n_fwmbox; i++) {
 		*fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
 	}
 
 	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
 
 exit_qla_mbx_cmd:
 	return (ret);
 }
 
 int
 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
 	uint32_t *num_rcvq)
 {
 	uint32_t *mbox, err;
 	device_t dev = ha->pci_dev;
 
 	bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
 
 	mbox = ha->hw.mbox;
 
 	mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);	
 
 	if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
 		device_printf(dev, "%s: failed0\n", __func__);
 		return (-1);
 	}
 	err = mbox[0] >> 25; 
 
 	if (supports_9kb != NULL) {
 		if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
 			*supports_9kb = 1;
 		else
 			*supports_9kb = 0;
 	}
 
 	if (num_rcvq != NULL)
 		*num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
 
 	if ((err != 1) && (err != 0)) {
 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 		return (-1);
 	}
 	return 0;
 }
 
 static int
 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
 	uint32_t create)
 {
 	uint32_t i, err;
 	device_t dev = ha->pci_dev;
 	q80_config_intr_t *c_intr;
 	q80_config_intr_rsp_t *c_intr_rsp;
 
 	c_intr = (q80_config_intr_t *)ha->hw.mbox;
 	bzero(c_intr, (sizeof (q80_config_intr_t)));
 
 	c_intr->opcode = Q8_MBX_CONFIG_INTR;
 
 	c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
 	c_intr->count_version |= Q8_MBX_CMD_VERSION;
 
 	c_intr->nentries = num_intrs;
 
 	for (i = 0; i < num_intrs; i++) {
 		if (create) {
 			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
 			c_intr->intr[i].msix_index = start_idx + 1 + i;
 		} else {
 			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
 			c_intr->intr[i].msix_index =
 				ha->hw.intr_id[(start_idx + i)];
 		}
 
 		c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
 	}
 
 	if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
 		(sizeof (q80_config_intr_t) >> 2),
 		ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
 		device_printf(dev, "%s: failed0\n", __func__);
 		return (-1);
 	}
 
 	c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
 
 	err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
 
 	if (err) {
 		device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
 			c_intr_rsp->nentries);
 
 		for (i = 0; i < c_intr_rsp->nentries; i++) {
 			device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
 				__func__, i, 
 				c_intr_rsp->intr[i].status,
 				c_intr_rsp->intr[i].intr_id,
 				c_intr_rsp->intr[i].intr_src);
 		}
 
 		return (-1);
 	}
 
 	for (i = 0; ((i < num_intrs) && create); i++) {
 		if (!c_intr_rsp->intr[i].status) {
 			ha->hw.intr_id[(start_idx + i)] =
 				c_intr_rsp->intr[i].intr_id;
 			ha->hw.intr_src[(start_idx + i)] =
 				c_intr_rsp->intr[i].intr_src;
 		}
 	}
 
 	return (0);
 }
 
 /*
  * Name: qla_config_rss
  * Function: Configure RSS for the context/interface.
  */
 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
 			0x8030f20c77cb2da3ULL,
 			0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
 			0x255b0ec26d5a56daULL };
 
 static int
 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
 {
 	q80_config_rss_t	*c_rss;
 	q80_config_rss_rsp_t	*c_rss_rsp;
 	uint32_t		err, i;
 	device_t		dev = ha->pci_dev;
 
 	c_rss = (q80_config_rss_t *)ha->hw.mbox;
 	bzero(c_rss, (sizeof (q80_config_rss_t)));
 
 	c_rss->opcode = Q8_MBX_CONFIG_RSS;
 
 	c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
 	c_rss->count_version |= Q8_MBX_CMD_VERSION;
 
 	c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
 				Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
 	//c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
 	//			Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
 
 	c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
 	c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
 
 	c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
 
 	c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
 	c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
 
 	c_rss->cntxt_id = cntxt_id;
 
 	for (i = 0; i < 5; i++) {
 		c_rss->rss_key[i] = rss_key[i];
 	}
 
 	if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
 		(sizeof (q80_config_rss_t) >> 2),
 		ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
 		device_printf(dev, "%s: failed0\n", __func__);
 		return (-1);
 	}
 	c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
 
 	err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
 
 	if (err) {
 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 		return (-1);
 	}
 	return 0;
 }
 
 static int
 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
         uint16_t cntxt_id, uint8_t *ind_table)
 {
         q80_config_rss_ind_table_t      *c_rss_ind;
         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
         uint32_t                        err;
         device_t                        dev = ha->pci_dev;
 
 	if ((count > Q8_RSS_IND_TBL_SIZE) ||
 		((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
 		device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
 			start_idx, count);
 		return (-1);
 	}
 
         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
 
         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
 
 	c_rss_ind->start_idx = start_idx;
 	c_rss_ind->end_idx = start_idx + count - 1;
 	c_rss_ind->cntxt_id = cntxt_id;
 	bcopy(ind_table, c_rss_ind->ind_table, count);
 
 	if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
 		(sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
 		(sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
 		device_printf(dev, "%s: failed0\n", __func__);
 		return (-1);
 	}
 
 	c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
 	err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
 
 	if (err) {
 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 		return (-1);
 	}
 	return 0;
 }
 
 /*
  * Name: qla_config_intr_coalesce
  * Function: Configure Interrupt Coalescing.
  */
 static int
 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
 	int rcv)
 {
 	q80_config_intr_coalesc_t	*intrc;
 	q80_config_intr_coalesc_rsp_t	*intrc_rsp;
 	uint32_t			err, i;
 	device_t			dev = ha->pci_dev;
 	
 	intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
 	bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
 
 	intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
 	intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
 	intrc->count_version |= Q8_MBX_CMD_VERSION;
 
 	if (rcv) {
 		intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
 		intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
 		intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
 	} else {
 		intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
 		intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
 		intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
 	}
 
 	intrc->cntxt_id = cntxt_id;
 
 	if (tenable) {
 		intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
 		intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
 
 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
 			intrc->sds_ring_mask |= (1 << i);
 		}
 		intrc->ms_timeout = 1000;
 	}
 
 	if (qla_mbx_cmd(ha, (uint32_t *)intrc,
 		(sizeof (q80_config_intr_coalesc_t) >> 2),
 		ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
 		device_printf(dev, "%s: failed0\n", __func__);
 		return (-1);
 	}
 	intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
 
 	err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
 
 	if (err) {
 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 		return (-1);
 	}
 	
 	return 0;
 }
 
 
 /*
  * Name: qla_config_mac_addr
  * Function: binds a MAC address to the context/interface.
  *	Can be unicast, multicast or broadcast.
  */
 static int
 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
 	uint32_t num_mac)
 {
 	q80_config_mac_addr_t		*cmac;
 	q80_config_mac_addr_rsp_t	*cmac_rsp;
 	uint32_t			err;
 	device_t			dev = ha->pci_dev;
 	int				i;
 	uint8_t				*mac_cpy = mac_addr;
 
 	if (num_mac > Q8_MAX_MAC_ADDRS) {
 		device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
 			__func__, (add_mac ? "Add" : "Del"), num_mac);
 		return (-1);
 	}
 
 	cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
 	bzero(cmac, (sizeof (q80_config_mac_addr_t)));
 
 	cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
 	cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
 	cmac->count_version |= Q8_MBX_CMD_VERSION;
 
 	if (add_mac) 
 		cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
 	else
 		cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
 		
 	cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
 
 	cmac->nmac_entries = num_mac;
 	cmac->cntxt_id = ha->hw.rcv_cntxt_id;
 
 	for (i = 0; i < num_mac; i++) {
 		bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 
 		mac_addr = mac_addr + ETHER_ADDR_LEN;
 	}
 
 	if (qla_mbx_cmd(ha, (uint32_t *)cmac,
 		(sizeof (q80_config_mac_addr_t) >> 2),
 		ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
 		device_printf(dev, "%s: %s failed0\n", __func__,
 			(add_mac ? "Add" : "Del"));
 		return (-1);
 	}
 	cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
 
 	err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
 
 	if (err) {
 		device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
 			(add_mac ? "Add" : "Del"), err);
 		for (i = 0; i < num_mac; i++) {
 			device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
 				__func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
 				mac_cpy[3], mac_cpy[4], mac_cpy[5]);
 			mac_cpy += ETHER_ADDR_LEN;
 		}
 		return (-1);
 	}
 	
 	return 0;
 }
 
 
 /*
  * Name: qla_set_mac_rcv_mode
  * Function: Enable/Disable AllMulticast and Promiscous Modes.
  */
 static int
 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
 {
 	q80_config_mac_rcv_mode_t	*rcv_mode;
 	uint32_t			err;
 	q80_config_mac_rcv_mode_rsp_t	*rcv_mode_rsp;
 	device_t			dev = ha->pci_dev;
 
 	rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
 	bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
 
 	rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
 	rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
 	rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
 
 	rcv_mode->mode = mode;
 
 	rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
 
 	if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
 		(sizeof (q80_config_mac_rcv_mode_t) >> 2),
 		ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
 		device_printf(dev, "%s: failed0\n", __func__);
 		return (-1);
 	}
 	rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
 
 	err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
 
 	if (err) {
 		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 		return (-1);
 	}
 	
 	return 0;
 }
 
 int
 ql_set_promisc(qla_host_t *ha)
 {
 	int ret;
 
 	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
 	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
 	return (ret);
 }
 
 void
 qla_reset_promisc(qla_host_t *ha)
 {
 	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
 	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
 }
 
 int
 ql_set_allmulti(qla_host_t *ha)
 {
 	int ret;
 
 	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
 	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
 	return (ret);
 }
 
 void
 qla_reset_allmulti(qla_host_t *ha)
 {
 	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
 	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
 }
 
 /*
  * Name: ql_set_max_mtu
  * Function:
  *	Sets the maximum transfer unit size for the specified rcv context.
  */
 int
 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
 {
 	device_t		dev;
 	q80_set_max_mtu_t	*max_mtu;
 	q80_set_max_mtu_rsp_t	*max_mtu_rsp;
 	uint32_t		err;
 
 	dev = ha->pci_dev;
 
 	max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
 	bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
 
 	max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
 	max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
 	max_mtu->count_version |= Q8_MBX_CMD_VERSION;
 
 	max_mtu->cntxt_id = cntxt_id;
 	max_mtu->mtu = mtu;
 
         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
 		(sizeof (q80_set_max_mtu_t) >> 2),
                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
                 device_printf(dev, "%s: failed\n", __func__);
                 return -1;
         }
 
 	max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
 
         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
         }
 
 	return 0;
 }
 
 static int
 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
 {
 	device_t		dev;
 	q80_link_event_t	*lnk;
 	q80_link_event_rsp_t	*lnk_rsp;
 	uint32_t		err;
 
 	dev = ha->pci_dev;
 
 	lnk = (q80_link_event_t *)ha->hw.mbox;
 	bzero(lnk, (sizeof (q80_link_event_t)));
 
 	lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
 	lnk->count_version = (sizeof (q80_link_event_t) >> 2);
 	lnk->count_version |= Q8_MBX_CMD_VERSION;
 
 	lnk->cntxt_id = cntxt_id;
 	lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
 
         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed\n", __func__);
                 return -1;
         }
 
 	lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
 
         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
         }
 
 	return 0;
 }
 
 static int
 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
 {
 	device_t		dev;
 	q80_config_fw_lro_t	*fw_lro;
 	q80_config_fw_lro_rsp_t	*fw_lro_rsp;
 	uint32_t		err;
 
 	dev = ha->pci_dev;
 
 	fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
 	bzero(fw_lro, sizeof(q80_config_fw_lro_t));
 
 	fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
 	fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
 	fw_lro->count_version |= Q8_MBX_CMD_VERSION;
 
 	fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
 	fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
 
 	fw_lro->cntxt_id = cntxt_id;
 
 	if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
 		(sizeof (q80_config_fw_lro_t) >> 2),
 		ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
 		device_printf(dev, "%s: failed\n", __func__);
 		return -1;
 	}
 
 	fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
 
 	err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
 
 	if (err) {
 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 	}
 
 	return 0;
 }
 
 static int
 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
 {
 	device_t                dev;
 	q80_hw_config_t         *hw_config;
 	q80_hw_config_rsp_t     *hw_config_rsp;
 	uint32_t                err;
 
 	dev = ha->pci_dev;
 
 	hw_config = (q80_hw_config_t *)ha->hw.mbox;
 	bzero(hw_config, sizeof (q80_hw_config_t));
 
 	hw_config->opcode = Q8_MBX_HW_CONFIG;
 	hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
 	hw_config->count_version |= Q8_MBX_CMD_VERSION;
 
 	hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
 
 	hw_config->u.set_cam_search_mode.mode = search_mode;
 
 	if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
 		(sizeof (q80_hw_config_t) >> 2),
 		ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
 		device_printf(dev, "%s: failed\n", __func__);
 		return -1;
 	}
 	hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
 
 	err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
 
 	if (err) {
 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 	}
 
 	return 0;
 }
 
 static int
 qla_get_cam_search_mode(qla_host_t *ha)
 {
 	device_t                dev;
 	q80_hw_config_t         *hw_config;
 	q80_hw_config_rsp_t     *hw_config_rsp;
 	uint32_t                err;
 
 	dev = ha->pci_dev;
 
 	hw_config = (q80_hw_config_t *)ha->hw.mbox;
 	bzero(hw_config, sizeof (q80_hw_config_t));
 
 	hw_config->opcode = Q8_MBX_HW_CONFIG;
 	hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
 	hw_config->count_version |= Q8_MBX_CMD_VERSION;
 
 	hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
 
 	if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
 		(sizeof (q80_hw_config_t) >> 2),
 		ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
 		device_printf(dev, "%s: failed\n", __func__);
 		return -1;
 	}
 	hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
 
 	err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
 
 	if (err) {
 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 	} else {
 		device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
 			hw_config_rsp->u.get_cam_search_mode.mode);
 	}
 
 	return 0;
 }
 
 
 
 static void
 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
 {
 	device_t dev = ha->pci_dev;
 
 	if (i < ha->hw.num_tx_rings) {
 		device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
 			__func__, i, xstat->total_bytes);
 		device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
 			__func__, i, xstat->total_pkts);
 		device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
 			__func__, i, xstat->errors);
 		device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
 			__func__, i, xstat->pkts_dropped);
 		device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
 			__func__, i, xstat->switch_pkts);
 		device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
 			__func__, i, xstat->num_buffers);
 	} else {
 		device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
 			__func__, xstat->total_bytes);
 		device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
 			__func__, xstat->total_pkts);
 		device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
 			__func__, xstat->errors);
 		device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
 			__func__, xstat->pkts_dropped);
 		device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
 			__func__, xstat->switch_pkts);
 		device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
 			__func__, xstat->num_buffers);
 	}
 }
 
 static void
 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
 {
 	device_t dev = ha->pci_dev;
 
 	device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
 		rstat->total_bytes);
 	device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
 		rstat->total_pkts);
 	device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
 		rstat->lro_pkt_count);
 	device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
 		rstat->sw_pkt_count);
 	device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
 		rstat->ip_chksum_err);
 	device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
 		rstat->pkts_wo_acntxts);
 	device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
 		__func__, rstat->pkts_dropped_no_sds_card);
 	device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
 		__func__, rstat->pkts_dropped_no_sds_host);
 	device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
 		rstat->oversized_pkts);
 	device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
 		__func__, rstat->pkts_dropped_no_rds);
 	device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
 		__func__, rstat->unxpctd_mcast_pkts);
 	device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
 		rstat->re1_fbq_error);
 	device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
 		rstat->invalid_mac_addr);
 	device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
 		rstat->rds_prime_trys);
 	device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
 		rstat->rds_prime_success);
 	device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
 		rstat->lro_flows_added);
 	device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
 		rstat->lro_flows_deleted);
 	device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
 		rstat->lro_flows_active);
 	device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
 		__func__, rstat->pkts_droped_unknown);
 	device_printf(dev, "%s: pkts_cnt_oversized\t\t%" PRIu64 "\n",
 		__func__, rstat->pkts_cnt_oversized);
 }
 
 static void
 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
 {
 	device_t dev = ha->pci_dev;
 
 	device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
 		mstat->xmt_frames);
 	device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
 		mstat->xmt_bytes);
 	device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
 		mstat->xmt_mcast_pkts);
 	device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
 		mstat->xmt_bcast_pkts);
 	device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
 		mstat->xmt_pause_frames);
 	device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
 		mstat->xmt_cntrl_pkts);
 	device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->xmt_pkt_lt_64bytes);
 	device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->xmt_pkt_lt_127bytes);
 	device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->xmt_pkt_lt_255bytes);
 	device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->xmt_pkt_lt_511bytes);
 	device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->xmt_pkt_lt_1023bytes);
 	device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->xmt_pkt_lt_1518bytes);
 	device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->xmt_pkt_gt_1518bytes);
 
 	device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
 		mstat->rcv_frames);
 	device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
 		mstat->rcv_bytes);
 	device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
 		mstat->rcv_mcast_pkts);
 	device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
 		mstat->rcv_bcast_pkts);
 	device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
 		mstat->rcv_pause_frames);
 	device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
 		mstat->rcv_cntrl_pkts);
 	device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->rcv_pkt_lt_64bytes);
 	device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->rcv_pkt_lt_127bytes);
 	device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->rcv_pkt_lt_255bytes);
 	device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->rcv_pkt_lt_511bytes);
 	device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->rcv_pkt_lt_1023bytes);
 	device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->rcv_pkt_lt_1518bytes);
 	device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
 		__func__, mstat->rcv_pkt_gt_1518bytes);
 
 	device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
 		mstat->rcv_len_error);
 	device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
 		mstat->rcv_len_small);
 	device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
 		mstat->rcv_len_large);
 	device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
 		mstat->rcv_jabber);
 	device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
 		mstat->rcv_dropped);
 	device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
 		mstat->fcs_error);
 	device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
 		mstat->align_error);
 }
 
 
 static int
 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
 {
 	device_t		dev;
 	q80_get_stats_t		*stat;
 	q80_get_stats_rsp_t	*stat_rsp;
 	uint32_t		err;
 
 	dev = ha->pci_dev;
 
 	stat = (q80_get_stats_t *)ha->hw.mbox;
 	bzero(stat, (sizeof (q80_get_stats_t)));
 
 	stat->opcode = Q8_MBX_GET_STATS;
 	stat->count_version = 2;
 	stat->count_version |= Q8_MBX_CMD_VERSION;
 
 	stat->cmd = cmd;
 
         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
                 ha->hw.mbox, (rsp_size >> 2), 0)) {
                 device_printf(dev, "%s: failed\n", __func__);
                 return -1;
         }
 
 	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
 
         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
 
         if (err) {
                 return -1;
         }
 
 	return 0;
 }
 
 void
 ql_get_stats(qla_host_t *ha)
 {
 	q80_get_stats_rsp_t	*stat_rsp;
 	q80_mac_stats_t		*mstat;
 	q80_xmt_stats_t		*xstat;
 	q80_rcv_stats_t		*rstat;
 	uint32_t		cmd;
 	int			i;
 
 	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
 	/*
 	 * Get MAC Statistics
 	 */
 	cmd = Q8_GET_STATS_CMD_TYPE_MAC;
 //	cmd |= Q8_GET_STATS_CMD_CLEAR;
 
 	cmd |= ((ha->pci_func & 0x1) << 16);
 
 	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
 		mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
 		qla_mac_stats(ha, mstat);
 	} else {
                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
 			__func__, ha->hw.mbox[0]);
 	}
 	/*
 	 * Get RCV Statistics
 	 */
 	cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
 //	cmd |= Q8_GET_STATS_CMD_CLEAR;
 	cmd |= (ha->hw.rcv_cntxt_id << 16);
 
 	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
 		rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
 		qla_rcv_stats(ha, rstat);
 	} else {
                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
 			__func__, ha->hw.mbox[0]);
 	}
 	/*
 	 * Get XMT Statistics
 	 */
 	for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
 		cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
 //		cmd |= Q8_GET_STATS_CMD_CLEAR;
 		cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
 
 		if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
 			== 0) {
 			xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
 			qla_xmt_stats(ha, xstat, i);
 		} else {
 			device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
 				__func__, ha->hw.mbox[0]);
 		}
 	}
 	return;
 }
 
 static void
 qla_get_quick_stats(qla_host_t *ha)
 {
 	q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
 	q80_mac_stats_t         *mstat;
 	q80_xmt_stats_t         *xstat;
 	q80_rcv_stats_t         *rstat;
 	uint32_t                cmd;
 
 	stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
 
 	cmd = Q8_GET_STATS_CMD_TYPE_ALL;
 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
 
 //      cmd |= ((ha->pci_func & 0x3) << 16);
 	cmd |= (0xFFFF << 16);
 
 	if (qla_get_hw_stats(ha, cmd,
 			sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
 
 		mstat = (q80_mac_stats_t *)&stat_rsp->mac;
 		rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
 		xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
 		qla_mac_stats(ha, mstat);
 		qla_rcv_stats(ha, rstat);
 		qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
 	} else {
 		device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
 			__func__, ha->hw.mbox[0]);
 	}
 	return;
 }
 
 /*
  * Name: qla_tx_tso
  * Function: Checks if the packet to be transmitted is a candidate for
  *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
  *	Ring Structure are plugged in.
  */
 static int
 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
 {
 	struct ether_vlan_header *eh;
 	struct ip *ip = NULL;
 	struct ip6_hdr *ip6 = NULL;
 	struct tcphdr *th = NULL;
 	uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
 	uint16_t etype, opcode, offload = 1;
 	device_t dev;
 
 	dev = ha->pci_dev;
 
 
 	eh = mtod(mp, struct ether_vlan_header *);
 
 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 		etype = ntohs(eh->evl_proto);
 	} else {
 		ehdrlen = ETHER_HDR_LEN;
 		etype = ntohs(eh->evl_encap_proto);
 	}
 
 	hdrlen = 0;
 
 	switch (etype) {
 		case ETHERTYPE_IP:
 
 			tcp_opt_off = ehdrlen + sizeof(struct ip) +
 					sizeof(struct tcphdr);
 
 			if (mp->m_len < tcp_opt_off) {
 				m_copydata(mp, 0, tcp_opt_off, hdr);
 				ip = (struct ip *)(hdr + ehdrlen);
 			} else {
 				ip = (struct ip *)(mp->m_data + ehdrlen);
 			}
 
 			ip_hlen = ip->ip_hl << 2;
 			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
 
 				
 			if ((ip->ip_p != IPPROTO_TCP) ||
 				(ip_hlen != sizeof (struct ip))){
 				/* IP Options are not supported */
 
 				offload = 0;
 			} else
 				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
 
 		break;
 
 		case ETHERTYPE_IPV6:
 
 			tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
 					sizeof (struct tcphdr);
 
 			if (mp->m_len < tcp_opt_off) {
 				m_copydata(mp, 0, tcp_opt_off, hdr);
 				ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
 			} else {
 				ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
 			}
 
 			ip_hlen = sizeof(struct ip6_hdr);
 			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
 
 			if (ip6->ip6_nxt != IPPROTO_TCP) {
 				//device_printf(dev, "%s: ipv6\n", __func__);
 				offload = 0;
 			} else
 				th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
 		break;
 
 		default:
 			QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
 			offload = 0;
 		break;
 	}
 
 	if (!offload)
 		return (-1);
 
 	tcp_hlen = th->th_off << 2;
 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
 
         if (mp->m_len < hdrlen) {
                 if (mp->m_len < tcp_opt_off) {
                         if (tcp_hlen > sizeof(struct tcphdr)) {
                                 m_copydata(mp, tcp_opt_off,
                                         (tcp_hlen - sizeof(struct tcphdr)),
                                         &hdr[tcp_opt_off]);
                         }
                 } else {
                         m_copydata(mp, 0, hdrlen, hdr);
                 }
         }
 
 	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
 
 	tx_cmd->flags_opcode = opcode ;
 	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
 	tx_cmd->total_hdr_len = hdrlen;
 
 	/* Check for Multicast least significant bit of MSB == 1 */
 	if (eh->evl_dhost[0] & 0x01) {
 		tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
 	}
 
 	if (mp->m_len < hdrlen) {
 		printf("%d\n", hdrlen);
 		return (1);
 	}
 
 	return (0);
 }
 
 /*
  * Name: qla_tx_chksum
  * Function: Checks if the packet to be transmitted is a candidate for
  *	TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
  *	Ring Structure are plugged in.
  */
 static int
 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
 	uint32_t *tcp_hdr_off)
 {
 	struct ether_vlan_header *eh;
 	struct ip *ip;
 	struct ip6_hdr *ip6;
 	uint32_t ehdrlen, ip_hlen;
 	uint16_t etype, opcode, offload = 1;
 	device_t dev;
 	uint8_t buf[sizeof(struct ip6_hdr)];
 
 	dev = ha->pci_dev;
 
 	*op_code = 0;
 
 	if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
 		return (-1);
 
 	eh = mtod(mp, struct ether_vlan_header *);
 
 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 		etype = ntohs(eh->evl_proto);
 	} else {
 		ehdrlen = ETHER_HDR_LEN;
 		etype = ntohs(eh->evl_encap_proto);
 	}
 
 		
 	switch (etype) {
 		case ETHERTYPE_IP:
 			ip = (struct ip *)(mp->m_data + ehdrlen);
 
 			ip_hlen = sizeof (struct ip);
 
 			if (mp->m_len < (ehdrlen + ip_hlen)) {
 				m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
 				ip = (struct ip *)buf;
 			}
 
 			if (ip->ip_p == IPPROTO_TCP)
 				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
 			else if (ip->ip_p == IPPROTO_UDP)
 				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
 			else {
 				//device_printf(dev, "%s: ipv4\n", __func__);
 				offload = 0;
 			}
 		break;
 
 		case ETHERTYPE_IPV6:
 			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
 
 			ip_hlen = sizeof(struct ip6_hdr);
 
 			if (mp->m_len < (ehdrlen + ip_hlen)) {
 				m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
 					buf);
 				ip6 = (struct ip6_hdr *)buf;
 			}
 
 			if (ip6->ip6_nxt == IPPROTO_TCP)
 				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
 			else if (ip6->ip6_nxt == IPPROTO_UDP)
 				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
 			else {
 				//device_printf(dev, "%s: ipv6\n", __func__);
 				offload = 0;
 			}
 		break;
 
 		default:
 			offload = 0;
 		break;
 	}
 	if (!offload)
 		return (-1);
 
 	*op_code = opcode;
 	*tcp_hdr_off = (ip_hlen + ehdrlen);
 
 	return (0);
 }
 
 #define QLA_TX_MIN_FREE 2
 /*
  * Name: ql_hw_send
  * Function: Transmits a packet. It first checks if the packet is a
  *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
  *	offload. If either of these creteria are not met, it is transmitted
  *	as a regular ethernet frame.
  */
 int
 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
 	uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
 {
 	struct ether_vlan_header *eh;
 	qla_hw_t *hw = &ha->hw;
 	q80_tx_cmd_t *tx_cmd, tso_cmd;
 	bus_dma_segment_t *c_seg;
 	uint32_t num_tx_cmds, hdr_len = 0;
 	uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
 	device_t dev;
 	int i, ret;
 	uint8_t *src = NULL, *dst = NULL;
 	uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
 	uint32_t op_code = 0;
 	uint32_t tcp_hdr_off = 0;
 
 	dev = ha->pci_dev;
 
 	/*
 	 * Always make sure there is atleast one empty slot in the tx_ring
 	 * tx_ring is considered full when there only one entry available
 	 */
         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
 
 	total_length = mp->m_pkthdr.len;
 	if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
 		device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
 			__func__, total_length);
 		return (-1);
 	}
 	eh = mtod(mp, struct ether_vlan_header *);
 
 	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
 
 		bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
 
 		src = frame_hdr;
 		ret = qla_tx_tso(ha, mp, &tso_cmd, src);
 
 		if (!(ret & ~1)) {
 			/* find the additional tx_cmd descriptors required */
 
 			if (mp->m_flags & M_VLANTAG)
 				tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
 
 			hdr_len = tso_cmd.total_hdr_len;
 
 			bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
 			bytes = QL_MIN(bytes, hdr_len);
 
 			num_tx_cmds++;
 			hdr_len -= bytes;
 
 			while (hdr_len) {
 				bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
 				hdr_len -= bytes;
 				num_tx_cmds++;
 			}
 			hdr_len = tso_cmd.total_hdr_len;
 
 			if (ret == 0)
 				src = (uint8_t *)eh;
 		} else 
 			return (EINVAL);
 	} else {
 		(void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
 	}
 
 	if (iscsi_pdu)
 		ha->hw.iscsi_pkt_count++;
 
 	if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
 		ql_hw_tx_done_locked(ha, txr_idx);
 		if (hw->tx_cntxt[txr_idx].txr_free <=
 				(num_tx_cmds + QLA_TX_MIN_FREE)) {
         		QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
 				"(num_tx_cmds + QLA_TX_MIN_FREE))\n",
 				__func__));
 			return (-1);
 		}
 	}
 
 	tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
 
         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
 
                 if (nsegs > ha->hw.max_tx_segs)
                         ha->hw.max_tx_segs = nsegs;
 
                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
 
                 if (op_code) {
                         tx_cmd->flags_opcode = op_code;
                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
 
                 } else {
                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
                 }
 	} else {
 		bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
 		ha->tx_tso_frames++;
 	}
 
 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
         	tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
 
 		if (iscsi_pdu)
 			eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
 
 	} else if (mp->m_flags & M_VLANTAG) {
 
 		if (hdr_len) { /* TSO */
 			tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
 						Q8_TX_CMD_FLAGS_HW_VLAN_ID);
 			tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
 		} else
 			tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
 
 		ha->hw_vlan_tx_frames++;
 		tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
 
 		if (iscsi_pdu) {
 			tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
 			mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
 		}
 	}
 
 
         tx_cmd->n_bufs = (uint8_t)nsegs;
         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
 	tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
 
 	c_seg = segs;
 
 	while (1) {
 		for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
 
 			switch (i) {
 			case 0:
 				tx_cmd->buf1_addr = c_seg->ds_addr;
 				tx_cmd->buf1_len = c_seg->ds_len;
 				break;
 
 			case 1:
 				tx_cmd->buf2_addr = c_seg->ds_addr;
 				tx_cmd->buf2_len = c_seg->ds_len;
 				break;
 
 			case 2:
 				tx_cmd->buf3_addr = c_seg->ds_addr;
 				tx_cmd->buf3_len = c_seg->ds_len;
 				break;
 
 			case 3:
 				tx_cmd->buf4_addr = c_seg->ds_addr;
 				tx_cmd->buf4_len = c_seg->ds_len;
 				break;
 			}
 
 			c_seg++;
 			nsegs--;
 		}
 
 		txr_next = hw->tx_cntxt[txr_idx].txr_next =
 			(hw->tx_cntxt[txr_idx].txr_next + 1) &
 				(NUM_TX_DESCRIPTORS - 1);
 		tx_cmd_count++;
 
 		if (!nsegs)
 			break;
 		
 		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
 		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
 	}
 
 	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
 
 		/* TSO : Copy the header in the following tx cmd descriptors */
 
 		txr_next = hw->tx_cntxt[txr_idx].txr_next;
 
 		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
 		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
 
 		bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
 		bytes = QL_MIN(bytes, hdr_len);
 
 		dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
 
 		if (mp->m_flags & M_VLANTAG) {
 			/* first copy the src/dst MAC addresses */
 			bcopy(src, dst, (ETHER_ADDR_LEN * 2));
 			dst += (ETHER_ADDR_LEN * 2);
 			src += (ETHER_ADDR_LEN * 2);
 			
 			*((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
 			dst += 2;
 			*((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
 			dst += 2;
 
 			/* bytes left in src header */
 			hdr_len -= ((ETHER_ADDR_LEN * 2) +
 					ETHER_VLAN_ENCAP_LEN);
 
 			/* bytes left in TxCmd Entry */
 			bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
 
 
 			bcopy(src, dst, bytes);
 			src += bytes;
 			hdr_len -= bytes;
 		} else {
 			bcopy(src, dst, bytes);
 			src += bytes;
 			hdr_len -= bytes;
 		}
 
 		txr_next = hw->tx_cntxt[txr_idx].txr_next =
 				(hw->tx_cntxt[txr_idx].txr_next + 1) &
 					(NUM_TX_DESCRIPTORS - 1);
 		tx_cmd_count++;
 		
 		while (hdr_len) {
 			tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
 			bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
 
 			bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
 
 			bcopy(src, tx_cmd, bytes);
 			src += bytes;
 			hdr_len -= bytes;
 
 			txr_next = hw->tx_cntxt[txr_idx].txr_next =
 				(hw->tx_cntxt[txr_idx].txr_next + 1) &
 					(NUM_TX_DESCRIPTORS - 1);
 			tx_cmd_count++;
 		}
 	}
 
 	hw->tx_cntxt[txr_idx].txr_free =
 		hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
 
 	QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
 		txr_idx);
        	QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
 
 	return (0);
 }
 
 
 
 #define Q8_CONFIG_IND_TBL_SIZE	32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
 static int
 qla_config_rss_ind_table(qla_host_t *ha)
 {
 	uint32_t i, count;
 	uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
 
 
 	for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
 		rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
 	}
 
 	for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
 		i = i + Q8_CONFIG_IND_TBL_SIZE) {
 
 		if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
 			count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
 		} else {
 			count = Q8_CONFIG_IND_TBL_SIZE;
 		}
 
 		if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
 			rss_ind_tbl))
 			return (-1);
 	}
 
 	return (0);
 }
 
 static int
 qla_config_soft_lro(qla_host_t *ha)
 {
         int i;
         qla_hw_t *hw = &ha->hw;
         struct lro_ctrl *lro;
 
         for (i = 0; i < hw->num_sds_rings; i++) {
                 lro = &hw->sds[i].lro;
 
 		bzero(lro, sizeof(struct lro_ctrl));
 
 #if (__FreeBSD_version >= 1100101)
                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
                         device_printf(ha->pci_dev,
 				"%s: tcp_lro_init_args [%d] failed\n",
                                 __func__, i);
                         return (-1);
                 }
 #else
                 if (tcp_lro_init(lro)) {
                         device_printf(ha->pci_dev,
 				"%s: tcp_lro_init [%d] failed\n",
                                 __func__, i);
                         return (-1);
                 }
 #endif /* #if (__FreeBSD_version >= 1100101) */
 
                 lro->ifp = ha->ifp;
         }
 
         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
         return (0);
 }
 
 static void
 qla_drain_soft_lro(qla_host_t *ha)
 {
         int i;
         qla_hw_t *hw = &ha->hw;
         struct lro_ctrl *lro;
 
        	for (i = 0; i < hw->num_sds_rings; i++) {
                	lro = &hw->sds[i].lro;
 
 #if (__FreeBSD_version >= 1100101)
 		tcp_lro_flush_all(lro);
 #else
                 struct lro_entry *queued;
 
 		while ((!SLIST_EMPTY(&lro->lro_active))) {
 			queued = SLIST_FIRST(&lro->lro_active);
 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
 			tcp_lro_flush(lro, queued);
 		}
 #endif /* #if (__FreeBSD_version >= 1100101) */
 	}
 
 	return;
 }
 
 static void
 qla_free_soft_lro(qla_host_t *ha)
 {
         int i;
         qla_hw_t *hw = &ha->hw;
         struct lro_ctrl *lro;
 
         for (i = 0; i < hw->num_sds_rings; i++) {
                	lro = &hw->sds[i].lro;
 		tcp_lro_free(lro);
 	}
 
 	return;
 }
 
 
 /*
  * Name: ql_del_hw_if
  * Function: Destroys the hardware specific entities corresponding to an
  *	Ethernet Interface
  */
 void
 ql_del_hw_if(qla_host_t *ha)
 {
 	uint32_t i;
 	uint32_t num_msix;
 
 	(void)qla_stop_nic_func(ha);
 
 	qla_del_rcv_cntxt(ha);
 
 	qla_del_xmt_cntxt(ha);
 
 	if (ha->hw.flags.init_intr_cnxt) {
 		for (i = 0; i < ha->hw.num_sds_rings; ) {
 
 			if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
 				num_msix = Q8_MAX_INTR_VECTORS;
 			else
 				num_msix = ha->hw.num_sds_rings - i;
 			qla_config_intr_cntxt(ha, i, num_msix, 0);
 
 			i += num_msix;
 		}
 
 		ha->hw.flags.init_intr_cnxt = 0;
 	}
 
 	if (ha->hw.enable_soft_lro) {
 		qla_drain_soft_lro(ha);
 		qla_free_soft_lro(ha);
 	}
 
 	return;
 }
 
 void
 qla_confirm_9kb_enable(qla_host_t *ha)
 {
 	uint32_t supports_9kb = 0;
 
 	ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
 
 	/* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
 	WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
 
 	qla_get_nic_partition(ha, &supports_9kb, NULL);
 
 	if (!supports_9kb)
 		ha->hw.enable_9kb = 0;
 
 	return;
 }
 
 /*
  * Name: ql_init_hw_if
  * Function: Creates the hardware specific entities corresponding to an
  *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
  *	corresponding to the interface. Enables LRO if allowed.
  */
 int
 ql_init_hw_if(qla_host_t *ha)
 {
 	device_t	dev;
 	uint32_t	i;
 	uint8_t		bcast_mac[6];
 	qla_rdesc_t	*rdesc;
 	uint32_t	num_msix;
 
 	dev = ha->pci_dev;
 
 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
 		bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
 			ha->hw.dma_buf.sds_ring[i].size);
 	}
 
 	for (i = 0; i < ha->hw.num_sds_rings; ) {
 
 		if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
 			num_msix = Q8_MAX_INTR_VECTORS;
 		else
 			num_msix = ha->hw.num_sds_rings - i;
 
 		if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
 
 			if (i > 0) {
 
 				num_msix = i;
 
 				for (i = 0; i < num_msix; ) {
 					qla_config_intr_cntxt(ha, i,
 						Q8_MAX_INTR_VECTORS, 0);
 					i += Q8_MAX_INTR_VECTORS;
 				}
 			}
 			return (-1);
 		}
 
 		i = i + num_msix;
 	}
 
         ha->hw.flags.init_intr_cnxt = 1;
 
 	/*
 	 * Create Receive Context
 	 */
 	if (qla_init_rcv_cntxt(ha)) {
 		return (-1);
 	}
 
 	for (i = 0; i < ha->hw.num_rds_rings; i++) {
 		rdesc = &ha->hw.rds[i];
 		rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
 		rdesc->rx_in = 0;
 		/* Update the RDS Producer Indices */
 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
 			rdesc->rx_next);
 	}
 
 
 	/*
 	 * Create Transmit Context
 	 */
 	if (qla_init_xmt_cntxt(ha)) {
 		qla_del_rcv_cntxt(ha);
 		return (-1);
 	}
 	ha->hw.max_tx_segs = 0;
 
 	if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
 		return(-1);
 
 	ha->hw.flags.unicast_mac = 1;
 
 	bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
 	bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
 
 	if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
 		return (-1);
 
 	ha->hw.flags.bcast_mac = 1;
 
 	/*
 	 * program any cached multicast addresses
 	 */
 	if (qla_hw_add_all_mcast(ha))
 		return (-1);
 
 	if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
 		return (-1);
 
 	if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
 		return (-1);
 
 	if (qla_config_rss_ind_table(ha))
 		return (-1);
 
 	if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
 		return (-1);
 
 	if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
 		return (-1);
 
 	if (ha->ifp->if_capenable & IFCAP_LRO) {
 		if (ha->hw.enable_hw_lro) {
 			ha->hw.enable_soft_lro = 0;
 
 			if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
 				return (-1);
 		} else {
 			ha->hw.enable_soft_lro = 1;
 
 			if (qla_config_soft_lro(ha))
 				return (-1);
 		}
 	}
 
         if (qla_init_nic_func(ha))
                 return (-1);
 
         if (qla_query_fw_dcbx_caps(ha))
                 return (-1);
 
 	for (i = 0; i < ha->hw.num_sds_rings; i++)
 		QL_ENABLE_INTERRUPTS(ha, i);
 
 	return (0);
 }
 
 static int
 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
 {
         device_t                dev = ha->pci_dev;
         q80_rq_map_sds_to_rds_t *map_rings;
 	q80_rsp_map_sds_to_rds_t *map_rings_rsp;
         uint32_t                i, err;
         qla_hw_t                *hw = &ha->hw;
 
         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
 
         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
         map_rings->count_version |= Q8_MBX_CMD_VERSION;
 
         map_rings->cntxt_id = hw->rcv_cntxt_id;
         map_rings->num_rings = num_idx;
 
 	for (i = 0; i < num_idx; i++) {
 		map_rings->sds_rds[i].sds_ring = i + start_idx;
 		map_rings->sds_rds[i].rds_ring = i + start_idx;
 	}
 
         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed0\n", __func__);
                 return (-1);
         }
 
         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
 
         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
                 return (-1);
         }
 
         return (0);
 }
 
 /*
  * Name: qla_init_rcv_cntxt
  * Function: Creates the Receive Context.
  */
 static int
 qla_init_rcv_cntxt(qla_host_t *ha)
 {
 	q80_rq_rcv_cntxt_t	*rcntxt;
 	q80_rsp_rcv_cntxt_t	*rcntxt_rsp;
 	q80_stat_desc_t		*sdesc;
 	int			i, j;
         qla_hw_t		*hw = &ha->hw;
 	device_t		dev;
 	uint32_t		err;
 	uint32_t		rcntxt_sds_rings;
 	uint32_t		rcntxt_rds_rings;
 	uint32_t		max_idx;
 
 	dev = ha->pci_dev;
 
 	/*
 	 * Create Receive Context
 	 */
 
 	for (i = 0; i < hw->num_sds_rings; i++) {
 		sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
 
 		for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
 			sdesc->data[0] = 1ULL;
 			sdesc->data[1] = 1ULL;
 		}
 	}
 
 	rcntxt_sds_rings = hw->num_sds_rings;
 	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
 		rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
 
 	rcntxt_rds_rings = hw->num_rds_rings;
 
 	if (hw->num_rds_rings > MAX_RDS_RING_SETS)
 		rcntxt_rds_rings = MAX_RDS_RING_SETS;
 
 	rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
 	bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
 
 	rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
 	rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
 	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
 
 	rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
 			Q8_RCV_CNTXT_CAP0_LRO |
 			Q8_RCV_CNTXT_CAP0_HW_LRO |
 			Q8_RCV_CNTXT_CAP0_RSS |
 			Q8_RCV_CNTXT_CAP0_SGL_LRO;
 
 	if (ha->hw.enable_9kb)
 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
 	else
 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
 
 	if (ha->hw.num_rds_rings > 1) {
 		rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
 		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
 	} else
 		rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
 
 	rcntxt->nsds_rings = rcntxt_sds_rings;
 
 	rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
 
 	rcntxt->rcv_vpid = 0;
 
 	for (i = 0; i <  rcntxt_sds_rings; i++) {
 		rcntxt->sds[i].paddr =
 			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
 		rcntxt->sds[i].size =
 			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
 		rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
 		rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
 	}
 
 	for (i = 0; i <  rcntxt_rds_rings; i++) {
 		rcntxt->rds[i].paddr_std =
 			qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
 
 		if (ha->hw.enable_9kb)
 			rcntxt->rds[i].std_bsize =
 				qla_host_to_le64(MJUM9BYTES);
 		else
 			rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
 
 		rcntxt->rds[i].std_nentries =
 			qla_host_to_le32(NUM_RX_DESCRIPTORS);
 	}
 
         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
 		(sizeof (q80_rq_rcv_cntxt_t) >> 2),
                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed0\n", __func__);
                 return (-1);
         }
 
         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
 
         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
                 return (-1);
         }
 
 	for (i = 0; i <  rcntxt_sds_rings; i++) {
 		hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
 	}
 
 	for (i = 0; i <  rcntxt_rds_rings; i++) {
 		hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
 	}
 
 	hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
 
 	ha->hw.flags.init_rx_cnxt = 1;
 
 	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
 
 		for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
 
 			if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
 				max_idx = MAX_RCNTXT_SDS_RINGS;
 			else
 				max_idx = hw->num_sds_rings - i;
 
 			err = qla_add_rcv_rings(ha, i, max_idx);
 			if (err)
 				return -1;
 
 			i += max_idx;
 		}
 	}
 
 	if (hw->num_rds_rings > 1) {
 
 		for (i = 0; i < hw->num_rds_rings; ) {
 
 			if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
 				max_idx = MAX_SDS_TO_RDS_MAP;
 			else
 				max_idx = hw->num_rds_rings - i;
 
 			err = qla_map_sds_to_rds(ha, i, max_idx);
 			if (err)
 				return -1;
 
 			i += max_idx;
 		}
 	}
 
 	return (0);
 }
 
 static int
 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
 {
 	device_t		dev = ha->pci_dev;
 	q80_rq_add_rcv_rings_t	*add_rcv;
 	q80_rsp_add_rcv_rings_t	*add_rcv_rsp;
 	uint32_t		i,j, err;
         qla_hw_t		*hw = &ha->hw;
 
 	add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
 	bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
 
 	add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
 	add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
 	add_rcv->count_version |= Q8_MBX_CMD_VERSION;
 
 	add_rcv->nrds_sets_rings = nsds | (1 << 5);
 	add_rcv->nsds_rings = nsds;
 	add_rcv->cntxt_id = hw->rcv_cntxt_id;
 
         for (i = 0; i <  nsds; i++) {
 
 		j = i + sds_idx;
 
                 add_rcv->sds[i].paddr =
                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
 
                 add_rcv->sds[i].size =
                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
 
                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
 
         }
 
         for (i = 0; (i <  nsds); i++) {
                 j = i + sds_idx;
 
                 add_rcv->rds[i].paddr_std =
                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
 
 		if (ha->hw.enable_9kb)
 			add_rcv->rds[i].std_bsize =
 				qla_host_to_le64(MJUM9BYTES);
 		else
                 	add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
 
                 add_rcv->rds[i].std_nentries =
                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
         }
 
 
         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
 		(sizeof (q80_rq_add_rcv_rings_t) >> 2),
                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed0\n", __func__);
                 return (-1);
         }
 
         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
 
         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
                 return (-1);
         }
 
 	for (i = 0; i < nsds; i++) {
 		hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
 	}
 
 	for (i = 0; i < nsds; i++) {
 		hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
 	}
 
 	return (0);
 }
 
 /*
  * Name: qla_del_rcv_cntxt
  * Function: Destroys the Receive Context.
  */
 static void
 qla_del_rcv_cntxt(qla_host_t *ha)
 {
 	device_t			dev = ha->pci_dev;
 	q80_rcv_cntxt_destroy_t		*rcntxt;
 	q80_rcv_cntxt_destroy_rsp_t	*rcntxt_rsp;
 	uint32_t			err;
 	uint8_t				bcast_mac[6];
 
 	if (!ha->hw.flags.init_rx_cnxt)
 		return;
 
 	if (qla_hw_del_all_mcast(ha))
 		return;
 
 	if (ha->hw.flags.bcast_mac) {
 
 		bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
 		bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
 
 		if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
 			return;
 		ha->hw.flags.bcast_mac = 0;
 
 	}
 
 	if (ha->hw.flags.unicast_mac) {
 		if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
 			return;
 		ha->hw.flags.unicast_mac = 0;
 	}
 
 	rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
 	bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
 
 	rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
 	rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
 	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
 
 	rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
 
         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
 		(sizeof (q80_rcv_cntxt_destroy_t) >> 2),
                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed0\n", __func__);
                 return;
         }
         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
 
         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
         }
 
 	ha->hw.flags.init_rx_cnxt = 0;
 	return;
 }
 
 /*
  * Name: qla_init_xmt_cntxt
  * Function: Creates the Transmit Context.
  */
 static int
 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
 {
 	device_t		dev;
         qla_hw_t		*hw = &ha->hw;
 	q80_rq_tx_cntxt_t	*tcntxt;
 	q80_rsp_tx_cntxt_t	*tcntxt_rsp;
 	uint32_t		err;
 	qla_hw_tx_cntxt_t       *hw_tx_cntxt;
 	uint32_t		intr_idx;
 
 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
 
 	dev = ha->pci_dev;
 
 	/*
 	 * Create Transmit Context
 	 */
 	tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
 	bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
 
 	tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
 	tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
 
 	intr_idx = txr_idx;
 
 #ifdef QL_ENABLE_ISCSI_TLV
 
 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
 				Q8_TX_CNTXT_CAP0_TC;
 
 	if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
 		tcntxt->traffic_class = 1;
 	}
 
 	intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
 
 #else
 	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
 
 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
 
 	tcntxt->ntx_rings = 1;
 
 	tcntxt->tx_ring[0].paddr =
 		qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
 	tcntxt->tx_ring[0].tx_consumer =
 		qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
 	tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
 
 	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
 	tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
 
 	hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
 	hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
 
         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
 		(sizeof (q80_rq_tx_cntxt_t) >> 2),
                 ha->hw.mbox,
 		(sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed0\n", __func__);
                 return (-1);
         }
         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
 
         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 		return -1;
         }
 
 	hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
 	hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
 
 	if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
 		return (-1);
 
 	return (0);
 }
 
 
 /*
  * Name: qla_del_xmt_cntxt
  * Function: Destroys the Transmit Context.
  */
 static int
 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
 {
 	device_t			dev = ha->pci_dev;
 	q80_tx_cntxt_destroy_t		*tcntxt;
 	q80_tx_cntxt_destroy_rsp_t	*tcntxt_rsp;
 	uint32_t			err;
 
 	tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
 	bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
 
 	tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
 	tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
 	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
 
 	tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
 
         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
 		(sizeof (q80_tx_cntxt_destroy_t) >> 2),
                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed0\n", __func__);
                 return (-1);
         }
         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
 
         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 		return (-1);
         }
 
 	return (0);
 }
 static void
 qla_del_xmt_cntxt(qla_host_t *ha)
 {
 	uint32_t i;
 
 	if (!ha->hw.flags.init_tx_cnxt)
 		return;
 
 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
 		if (qla_del_xmt_cntxt_i(ha, i))
 			break;
 	}
 	ha->hw.flags.init_tx_cnxt = 0;
 }
 
 static int
 qla_init_xmt_cntxt(qla_host_t *ha)
 {
 	uint32_t i, j;
 
 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
 		if (qla_init_xmt_cntxt_i(ha, i) != 0) {
 			for (j = 0; j < i; j++)
 				qla_del_xmt_cntxt_i(ha, j);
 			return (-1);
 		}
 	}
 	ha->hw.flags.init_tx_cnxt = 1;
 	return (0);
 }
 
 static int
 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
 {
 	int i, nmcast;
 	uint32_t count = 0;
 	uint8_t *mcast;
 
 	nmcast = ha->hw.nmcast;
 
 	QL_DPRINT2(ha, (ha->pci_dev,
 		"%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
 
 	mcast = ha->hw.mac_addr_arr;
 	memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
 
 	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
 		if ((ha->hw.mcast[i].addr[0] != 0) || 
 			(ha->hw.mcast[i].addr[1] != 0) ||
 			(ha->hw.mcast[i].addr[2] != 0) ||
 			(ha->hw.mcast[i].addr[3] != 0) ||
 			(ha->hw.mcast[i].addr[4] != 0) ||
 			(ha->hw.mcast[i].addr[5] != 0)) {
 
 			bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
 			mcast = mcast + ETHER_ADDR_LEN;
 			count++;
 			
 			if (count == Q8_MAX_MAC_ADDRS) {
 				if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
 					add_mcast, count)) {
                 			device_printf(ha->pci_dev,
 						"%s: failed\n", __func__);
 					return (-1);
 				}
 
 				count = 0;
 				mcast = ha->hw.mac_addr_arr;
 				memset(mcast, 0,
 					(Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
 			}
 
 			nmcast--;
 		}
 	}
 
 	if (count) {
 		if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
 			count)) {
                 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
 			return (-1);
 		}
 	}
 	QL_DPRINT2(ha, (ha->pci_dev,
 		"%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
 
 	return 0;
 }
 
 static int
 qla_hw_add_all_mcast(qla_host_t *ha)
 {
 	int ret;
 
 	ret = qla_hw_all_mcast(ha, 1);
 
 	return (ret);
 }
 
 static int
 qla_hw_del_all_mcast(qla_host_t *ha)
 {
 	int ret;
 
 	ret = qla_hw_all_mcast(ha, 0);
 
 	bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
 	ha->hw.nmcast = 0;
 
 	return (ret);
 }
 
 static int
 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
 {
 	int i;
 
 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
 		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
 			return (0); /* its been already added */
 	}
 	return (-1);
 }
 
 static int
 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
 {
 	int i;
 
 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
 
 		if ((ha->hw.mcast[i].addr[0] == 0) && 
 			(ha->hw.mcast[i].addr[1] == 0) &&
 			(ha->hw.mcast[i].addr[2] == 0) &&
 			(ha->hw.mcast[i].addr[3] == 0) &&
 			(ha->hw.mcast[i].addr[4] == 0) &&
 			(ha->hw.mcast[i].addr[5] == 0)) {
 
 			bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
 			ha->hw.nmcast++;	
 
 			mta = mta + ETHER_ADDR_LEN;
 			nmcast--;
 
 			if (nmcast == 0)
 				break;
 		}
 
 	}
 	return 0;
 }
 
 static int
 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
 {
 	int i;
 
 	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
 		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
 
 			ha->hw.mcast[i].addr[0] = 0;
 			ha->hw.mcast[i].addr[1] = 0;
 			ha->hw.mcast[i].addr[2] = 0;
 			ha->hw.mcast[i].addr[3] = 0;
 			ha->hw.mcast[i].addr[4] = 0;
 			ha->hw.mcast[i].addr[5] = 0;
 
 			ha->hw.nmcast--;	
 
 			mta = mta + ETHER_ADDR_LEN;
 			nmcast--;
 
 			if (nmcast == 0)
 				break;
 		}
 	}
 	return 0;
 }
 
 /*
  * Name: ql_hw_set_multi
  * Function: Sets the Multicast Addresses provided by the host O.S into the
  *	hardware (for the given interface)
  */
 int
 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
 	uint32_t add_mac)
 {
 	uint8_t *mta = mcast_addr;
 	int i;
 	int ret = 0;
 	uint32_t count = 0;
 	uint8_t *mcast;
 
 	mcast = ha->hw.mac_addr_arr;
 	memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
 
 	for (i = 0; i < mcnt; i++) {
 		if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
 			if (add_mac) {
 				if (qla_hw_mac_addr_present(ha, mta) != 0) {
 					bcopy(mta, mcast, ETHER_ADDR_LEN);
 					mcast = mcast + ETHER_ADDR_LEN;
 					count++;
 				}
 			} else {
 				if (qla_hw_mac_addr_present(ha, mta) == 0) {
 					bcopy(mta, mcast, ETHER_ADDR_LEN);
 					mcast = mcast + ETHER_ADDR_LEN;
 					count++;
 				}
 			}
 		}
 		if (count == Q8_MAX_MAC_ADDRS) {
 			if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
 				add_mac, count)) {
                 		device_printf(ha->pci_dev, "%s: failed\n",
 					__func__);
 				return (-1);
 			}
 
 			if (add_mac) {
 				qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
 					count);
 			} else {
 				qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
 					count);
 			}
 
 			count = 0;
 			mcast = ha->hw.mac_addr_arr;
 			memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
 		}
 			
 		mta += Q8_MAC_ADDR_LEN;
 	}
 
 	if (count) {
 		if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
 			count)) {
                 	device_printf(ha->pci_dev, "%s: failed\n", __func__);
 			return (-1);
 		}
 		if (add_mac) {
 			qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
 		} else {
 			qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
 		}
 	}
 
 	return (ret);
 }
 
 /*
  * Name: ql_hw_tx_done_locked
  * Function: Handle Transmit Completions
  */
 void
 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
 {
 	qla_tx_buf_t *txb;
         qla_hw_t *hw = &ha->hw;
 	uint32_t comp_idx, comp_count = 0;
 	qla_hw_tx_cntxt_t *hw_tx_cntxt;
 
 	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
 
 	/* retrieve index of last entry in tx ring completed */
 	comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
 
 	while (comp_idx != hw_tx_cntxt->txr_comp) {
 
 		txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
 
 		hw_tx_cntxt->txr_comp++;
 		if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
 			hw_tx_cntxt->txr_comp = 0;
 
 		comp_count++;
 
 		if (txb->m_head) {
 			if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
 
 			bus_dmamap_sync(ha->tx_tag, txb->map,
 				BUS_DMASYNC_POSTWRITE);
 			bus_dmamap_unload(ha->tx_tag, txb->map);
 			m_freem(txb->m_head);
 
 			txb->m_head = NULL;
 		}
 	}
 
 	hw_tx_cntxt->txr_free += comp_count;
 	return;
 }
 
 void
 ql_update_link_state(qla_host_t *ha)
 {
 	uint32_t link_state;
 	uint32_t prev_link_state;
 
 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 		ha->hw.link_up = 0;
 		return;
 	}
 	link_state = READ_REG32(ha, Q8_LINK_STATE);
 
 	prev_link_state =  ha->hw.link_up;
 
 	if (ha->pci_func == 0) 
 		ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
 	else
 		ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
 
 	if (prev_link_state !=  ha->hw.link_up) {
 		if (ha->hw.link_up) {
 			if_link_state_change(ha->ifp, LINK_STATE_UP);
 		} else {
 			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
 		}
 	}
 	return;
 }
 
 void
 ql_hw_stop_rcv(qla_host_t *ha)
 {
 	int i, done, count = 100;
 
 	ha->flags.stop_rcv = 1;
 
 	while (count) {
 		done = 1;
 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
 			if (ha->hw.sds[i].rcv_active)
 				done = 0;
 		}
 		if (done)
 			break;
 		else 
 			qla_mdelay(__func__, 10);
 		count--;
 	}
 	if (!count)
 		device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
 
 	return;
 }
 
 int
 ql_hw_check_health(qla_host_t *ha)
 {
 	uint32_t val;
 
 	ha->hw.health_count++;
 
-	if (ha->hw.health_count < 1000)
+	if (ha->hw.health_count < 500)
 		return 0;
 
 	ha->hw.health_count = 0;
 
 	val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
 
 	if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
 		(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
 		device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
 			__func__, val);
 		return -1;
 	}
 
 	val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
 
 	if ((val != ha->hw.hbeat_value) &&
 		(!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
 		ha->hw.hbeat_value = val;
+		ha->hw.hbeat_failure = 0;
 		return 0;
 	}
-	device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
-		__func__, val);
+
+	ha->hw.hbeat_failure++;
+
+	if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
+		return 0;
+	else
+		device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
+			__func__, val);
+
 
 	return -1;
 }
 
 static int
 qla_init_nic_func(qla_host_t *ha)
 {
         device_t                dev;
         q80_init_nic_func_t     *init_nic;
         q80_init_nic_func_rsp_t *init_nic_rsp;
         uint32_t                err;
 
         dev = ha->pci_dev;
 
         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
         bzero(init_nic, sizeof(q80_init_nic_func_t));
 
         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
         init_nic->count_version |= Q8_MBX_CMD_VERSION;
 
         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
 
 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
                 (sizeof (q80_init_nic_func_t) >> 2),
                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed\n", __func__);
                 return -1;
         }
 
         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
 
         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
         }
 
         return 0;
 }
 
 static int
 qla_stop_nic_func(qla_host_t *ha)
 {
         device_t                dev;
         q80_stop_nic_func_t     *stop_nic;
         q80_stop_nic_func_rsp_t *stop_nic_rsp;
         uint32_t                err;
 
         dev = ha->pci_dev;
 
         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
 
         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
 
         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
 
 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
                 (sizeof (q80_stop_nic_func_t) >> 2),
                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed\n", __func__);
                 return -1;
         }
 
         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
 
         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
         }
 
         return 0;
 }
 
 static int
 qla_query_fw_dcbx_caps(qla_host_t *ha)
 {
         device_t                        dev;
         q80_query_fw_dcbx_caps_t        *fw_dcbx;
         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
         uint32_t                        err;
 
         dev = ha->pci_dev;
 
         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
 
         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
 
         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed\n", __func__);
                 return -1;
         }
 
         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
 
         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
         }
 
         return 0;
 }
 
 static int
 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
         uint32_t aen_mb3, uint32_t aen_mb4)
 {
         device_t                dev;
         q80_idc_ack_t           *idc_ack;
         q80_idc_ack_rsp_t       *idc_ack_rsp;
         uint32_t                err;
         int                     count = 300;
 
         dev = ha->pci_dev;
 
         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
         bzero(idc_ack, sizeof(q80_idc_ack_t));
 
         idc_ack->opcode = Q8_MBX_IDC_ACK;
         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
 
         idc_ack->aen_mb1 = aen_mb1;
         idc_ack->aen_mb2 = aen_mb2;
         idc_ack->aen_mb3 = aen_mb3;
         idc_ack->aen_mb4 = aen_mb4;
 
         ha->hw.imd_compl= 0;
 
         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
                 (sizeof (q80_idc_ack_t) >> 2),
                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed\n", __func__);
                 return -1;
         }
 
         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
 
         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
                 return(-1);
         }
 
         while (count && !ha->hw.imd_compl) {
                 qla_mdelay(__func__, 100);
                 count--;
         }
 
         if (!count)
                 return -1;
         else
                 device_printf(dev, "%s: count %d\n", __func__, count);
 
         return (0);
 }
 
 static int
 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
 {
         device_t                dev;
         q80_set_port_cfg_t      *pcfg;
         q80_set_port_cfg_rsp_t  *pfg_rsp;
         uint32_t                err;
         int                     count = 300;
 
         dev = ha->pci_dev;
 
         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
         bzero(pcfg, sizeof(q80_set_port_cfg_t));
 
         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
         pcfg->count_version |= Q8_MBX_CMD_VERSION;
 
         pcfg->cfg_bits = cfg_bits;
 
         device_printf(dev, "%s: cfg_bits"
                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
                 " [0x%x, 0x%x, 0x%x]\n", __func__,
                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
 
         ha->hw.imd_compl= 0;
 
         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
                 (sizeof (q80_set_port_cfg_t) >> 2),
                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed\n", __func__);
                 return -1;
         }
 
         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
 
         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
 
         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
                 while (count && !ha->hw.imd_compl) {
                         qla_mdelay(__func__, 100);
                         count--;
                 }
                 if (count) {
                         device_printf(dev, "%s: count %d\n", __func__, count);
 
                         err = 0;
                 }
         }
 
         if (err) {
                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
                 return(-1);
         }
 
         return (0);
 }
 
 
 static int
 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
 {
 	uint32_t			err;
 	device_t			dev = ha->pci_dev;
 	q80_config_md_templ_size_t	*md_size;
 	q80_config_md_templ_size_rsp_t	*md_size_rsp;
 
 #ifndef QL_LDFLASH_FW
 
 	ql_minidump_template_hdr_t *hdr;
 
 	hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
 	*size = hdr->size_of_template;
 	return (0);
 
 #endif /* #ifdef QL_LDFLASH_FW */
 
 	md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
 	bzero(md_size, sizeof(q80_config_md_templ_size_t));
 
 	md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
 	md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
 	md_size->count_version |= Q8_MBX_CMD_VERSION;
 
 	if (qla_mbx_cmd(ha, (uint32_t *) md_size,
 		(sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
 		(sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
 
 		device_printf(dev, "%s: failed\n", __func__);
 
 		return (-1);
 	}
 
 	md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
 
 	err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
 
         if (err) {
 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 		return(-1);
         }
 
 	*size = md_size_rsp->templ_size;
 
 	return (0);
 }
 
 static int
 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
 {
         device_t                dev;
         q80_get_port_cfg_t      *pcfg;
         q80_get_port_cfg_rsp_t  *pcfg_rsp;
         uint32_t                err;
 
         dev = ha->pci_dev;
 
         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
         bzero(pcfg, sizeof(q80_get_port_cfg_t));
 
         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
         pcfg->count_version |= Q8_MBX_CMD_VERSION;
 
         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
                 (sizeof (q80_get_port_cfg_t) >> 2),
                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
                 device_printf(dev, "%s: failed\n", __func__);
                 return -1;
         }
 
         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
 
         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
 
         if (err) {
                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
                 return(-1);
         }
 
         device_printf(dev, "%s: [cfg_bits, port type]"
                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
                 " [0x%x, 0x%x, 0x%x]\n", __func__,
                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
                 );
 
         *cfg_bits = pcfg_rsp->cfg_bits;
 
         return (0);
 }
 
 int
 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
 {
         struct ether_vlan_header        *eh;
         uint16_t                        etype;
         struct ip                       *ip = NULL;
         struct ip6_hdr                  *ip6 = NULL;
         struct tcphdr                   *th = NULL;
         uint32_t                        hdrlen;
         uint32_t                        offset;
         uint8_t                         buf[sizeof(struct ip6_hdr)];
 
         eh = mtod(mp, struct ether_vlan_header *);
 
         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
                 etype = ntohs(eh->evl_proto);
         } else {
                 hdrlen = ETHER_HDR_LEN;
                 etype = ntohs(eh->evl_encap_proto);
         }
 
 	if (etype == ETHERTYPE_IP) {
 
 		offset = (hdrlen + sizeof (struct ip));
 
 		if (mp->m_len >= offset) {
                         ip = (struct ip *)(mp->m_data + hdrlen);
 		} else {
 			m_copydata(mp, hdrlen, sizeof (struct ip), buf);
                         ip = (struct ip *)buf;
 		}
 
                 if (ip->ip_p == IPPROTO_TCP) {
 
 			hdrlen += ip->ip_hl << 2;
 			offset = hdrlen + 4;
 	
 			if (mp->m_len >= offset) {
 				th = (struct tcphdr *)(mp->m_data + hdrlen);;
 			} else {
                                 m_copydata(mp, hdrlen, 4, buf);
 				th = (struct tcphdr *)buf;
 			}
                 }
 
 	} else if (etype == ETHERTYPE_IPV6) {
 
 		offset = (hdrlen + sizeof (struct ip6_hdr));
 
 		if (mp->m_len >= offset) {
                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
 		} else {
                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
                         ip6 = (struct ip6_hdr *)buf;
 		}
 
                 if (ip6->ip6_nxt == IPPROTO_TCP) {
 
 			hdrlen += sizeof(struct ip6_hdr);
 			offset = hdrlen + 4;
 
 			if (mp->m_len >= offset) {
 				th = (struct tcphdr *)(mp->m_data + hdrlen);;
 			} else {
 				m_copydata(mp, hdrlen, 4, buf);
 				th = (struct tcphdr *)buf;
 			}
                 }
 	}
 
         if (th != NULL) {
                 if ((th->th_sport == htons(3260)) ||
                         (th->th_dport == htons(3260)))
                         return 0;
         }
         return (-1);
 }
 
 void
 qla_hw_async_event(qla_host_t *ha)
 {
         switch (ha->hw.aen_mb0) {
         case 0x8101:
                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
                         ha->hw.aen_mb3, ha->hw.aen_mb4);
 
                 break;
 
         default:
                 break;
         }
 
         return;
 }
 
 #ifdef QL_LDFLASH_FW
 static int
 ql_get_minidump_template(qla_host_t *ha)
 {
 	uint32_t			err;
 	device_t			dev = ha->pci_dev;
 	q80_config_md_templ_cmd_t	*md_templ;
 	q80_config_md_templ_cmd_rsp_t	*md_templ_rsp;
 
 	md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
 	bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
 
 	md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
 	md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
 	md_templ->count_version |= Q8_MBX_CMD_VERSION;
 
 	md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
 	md_templ->buff_size = ha->hw.dma_buf.minidump.size;
 
 	if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
 		(sizeof(q80_config_md_templ_cmd_t) >> 2),
 		 ha->hw.mbox,
 		(sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
 
 		device_printf(dev, "%s: failed\n", __func__);
 
 		return (-1);
 	}
 
 	md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
 
 	err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
 
 	if (err) {
 		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 		return (-1);
 	}
 
 	return (0);
 
 }
 #endif /* #ifdef QL_LDFLASH_FW */
 
 /*
  * Minidump related functionality 
  */
 
 static int ql_parse_template(qla_host_t *ha);
 
 static uint32_t ql_rdcrb(qla_host_t *ha,
 			ql_minidump_entry_rdcrb_t *crb_entry,
 			uint32_t * data_buff);
 
 static uint32_t ql_pollrd(qla_host_t *ha,
 			ql_minidump_entry_pollrd_t *entry,
 			uint32_t * data_buff);
 
 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
 			ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
 			uint32_t *data_buff);
 
 static uint32_t ql_L2Cache(qla_host_t *ha,
 			ql_minidump_entry_cache_t *cacheEntry,
 			uint32_t * data_buff);
 
 static uint32_t ql_L1Cache(qla_host_t *ha,
 			ql_minidump_entry_cache_t *cacheEntry,
 			uint32_t *data_buff);
 
 static uint32_t ql_rdocm(qla_host_t *ha,
 			ql_minidump_entry_rdocm_t *ocmEntry,
 			uint32_t *data_buff);
 
 static uint32_t ql_rdmem(qla_host_t *ha,
 			ql_minidump_entry_rdmem_t *mem_entry,
 			uint32_t *data_buff);
 
 static uint32_t ql_rdrom(qla_host_t *ha,
 			ql_minidump_entry_rdrom_t *romEntry,
 			uint32_t *data_buff);
 
 static uint32_t ql_rdmux(qla_host_t *ha,
 			ql_minidump_entry_mux_t *muxEntry,
 			uint32_t *data_buff);
 
 static uint32_t ql_rdmux2(qla_host_t *ha,
 			ql_minidump_entry_mux2_t *muxEntry,
 			uint32_t *data_buff);
 
 static uint32_t ql_rdqueue(qla_host_t *ha,
 			ql_minidump_entry_queue_t *queueEntry,
 			uint32_t *data_buff);
 
 static uint32_t ql_cntrl(qla_host_t *ha,
 			ql_minidump_template_hdr_t *template_hdr,
 			ql_minidump_entry_cntrl_t *crbEntry);
 
 
 static uint32_t
 ql_minidump_size(qla_host_t *ha)
 {
 	uint32_t i, k;
 	uint32_t size = 0;
 	ql_minidump_template_hdr_t *hdr;
 
 	hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
 
 	i = 0x2;
 
 	for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
 		if (i & ha->hw.mdump_capture_mask)
 			size += hdr->capture_size_array[k];
 		i = i << 1;
 	}
 	return (size);
 }
 
 static void
 ql_free_minidump_buffer(qla_host_t *ha)
 {
 	if (ha->hw.mdump_buffer != NULL) {
 		free(ha->hw.mdump_buffer, M_QLA83XXBUF);
 		ha->hw.mdump_buffer = NULL;
 		ha->hw.mdump_buffer_size = 0;
 	}
 	return;
 }
 
 static int
 ql_alloc_minidump_buffer(qla_host_t *ha)
 {
 	ha->hw.mdump_buffer_size = ql_minidump_size(ha);
 
 	if (!ha->hw.mdump_buffer_size)
 		return (-1);
 
 	ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
 					M_NOWAIT);
 
 	if (ha->hw.mdump_buffer == NULL)
 		return (-1);
 
 	return (0);
 }
 
 static void
 ql_free_minidump_template_buffer(qla_host_t *ha)
 {
 	if (ha->hw.mdump_template != NULL) {
 		free(ha->hw.mdump_template, M_QLA83XXBUF);
 		ha->hw.mdump_template = NULL;
 		ha->hw.mdump_template_size = 0;
 	}
 	return;
 }
 
 static int
 ql_alloc_minidump_template_buffer(qla_host_t *ha)
 {
 	ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
 
 	ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
 					M_QLA83XXBUF, M_NOWAIT);
 
 	if (ha->hw.mdump_template == NULL)
 		return (-1);
 
 	return (0);
 }
 
 static int
 ql_alloc_minidump_buffers(qla_host_t *ha)
 {
 	int ret;
 
 	ret = ql_alloc_minidump_template_buffer(ha);
 
 	if (ret)
 		return (ret);
 
 	ret = ql_alloc_minidump_buffer(ha);
 
 	if (ret)
 		ql_free_minidump_template_buffer(ha);
 
 	return (ret);
 }
 
 
 static uint32_t
 ql_validate_minidump_checksum(qla_host_t *ha)
 {
         uint64_t sum = 0;
 	int count;
 	uint32_t *template_buff;
 
 	count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
 	template_buff = ha->hw.dma_buf.minidump.dma_b;
 
 	while (count-- > 0) {
 		sum += *template_buff++;
 	}
 
 	while (sum >> 32) {
 		sum = (sum & 0xFFFFFFFF) + (sum >> 32);
 	}
 
 	return (~sum);
 }
 
 int
 ql_minidump_init(qla_host_t *ha)
 {
 	int		ret = 0;
 	uint32_t	template_size = 0;
 	device_t	dev = ha->pci_dev;
 
 	/*
 	 * Get Minidump Template Size
  	 */
 	ret = qla_get_minidump_tmplt_size(ha, &template_size);
 
 	if (ret || (template_size == 0)) {
 		device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
 			template_size);
 		return (-1);
 	}
 
 	/*
 	 * Allocate Memory for Minidump Template
 	 */
 
 	ha->hw.dma_buf.minidump.alignment = 8;
 	ha->hw.dma_buf.minidump.size = template_size;
 
 #ifdef QL_LDFLASH_FW
 	if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
 
 		device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
 
 		return (-1);
 	}
 	ha->hw.dma_buf.flags.minidump = 1;
 
 	/*
 	 * Retrieve Minidump Template
 	 */
 	ret = ql_get_minidump_template(ha);
 #else
 	ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
 
 #endif /* #ifdef QL_LDFLASH_FW */
 
 	if (ret == 0) {
 
 		ret = ql_validate_minidump_checksum(ha);
 
 		if (ret == 0) {
 
 			ret = ql_alloc_minidump_buffers(ha);
 
 			if (ret == 0)
 		ha->hw.mdump_init = 1;
 			else
 				device_printf(dev,
 					"%s: ql_alloc_minidump_buffers"
 					" failed\n", __func__);
 		} else {
 			device_printf(dev, "%s: ql_validate_minidump_checksum"
 				" failed\n", __func__);
 		}
 	} else {
 		device_printf(dev, "%s: ql_get_minidump_template failed\n",
 			 __func__);
 	}
 
 	if (ret)
 		ql_minidump_free(ha);
 
 	return (ret);
 }
 
 static void
 ql_minidump_free(qla_host_t *ha)
 {
 	ha->hw.mdump_init = 0;
 	if (ha->hw.dma_buf.flags.minidump) {
 		ha->hw.dma_buf.flags.minidump = 0;
 		ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
 	}
 
 	ql_free_minidump_template_buffer(ha);
 	ql_free_minidump_buffer(ha);
 
 	return;
 }
 
 void
 ql_minidump(qla_host_t *ha)
 {
 	if (!ha->hw.mdump_init)
 		return;
 
 	if (ha->hw.mdump_done)
 		return;
 
 		ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
 
 	bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
 	bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
 
 	bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
 		ha->hw.mdump_template_size);
 
 	ql_parse_template(ha);
  
 	ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
 
 	ha->hw.mdump_done = 1;
 
 	return;
 }
 
 
 /*
  * helper routines
  */
 static void 
 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
 {
 	if (esize != entry->hdr.entry_capture_size) {
 		entry->hdr.entry_capture_size = esize;
 		entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
 	}
 	return;
 }
 
 
 static int 
 ql_parse_template(qla_host_t *ha)
 {
 	uint32_t num_of_entries, buff_level, e_cnt, esize;
 	uint32_t end_cnt, rv = 0;
 	char *dump_buff, *dbuff;
 	int sane_start = 0, sane_end = 0;
 	ql_minidump_template_hdr_t *template_hdr;
 	ql_minidump_entry_t *entry;
 	uint32_t capture_mask; 
 	uint32_t dump_size; 
 
 	/* Setup parameters */
 	template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
 
 	if (template_hdr->entry_type == TLHDR)
 		sane_start = 1;
 	
 	dump_buff = (char *) ha->hw.mdump_buffer;
 
 	num_of_entries = template_hdr->num_of_entries;
 
 	entry = (ql_minidump_entry_t *) ((char *)template_hdr 
 			+ template_hdr->first_entry_offset );
 
 	template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
 		template_hdr->ocm_window_array[ha->pci_func];
 	template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
 
 	capture_mask = ha->hw.mdump_capture_mask;
 	dump_size = ha->hw.mdump_buffer_size;
 
 	template_hdr->driver_capture_mask = capture_mask;
 
 	QL_DPRINT80(ha, (ha->pci_dev,
 		"%s: sane_start = %d num_of_entries = %d "
 		"capture_mask = 0x%x dump_size = %d \n", 
 		__func__, sane_start, num_of_entries, capture_mask, dump_size));
 
 	for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
 
 		/*
 		 * If the capture_mask of the entry does not match capture mask
 		 * skip the entry after marking the driver_flags indicator.
 		 */
 		
 		if (!(entry->hdr.entry_capture_mask & capture_mask)) {
 
 			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
 			entry = (ql_minidump_entry_t *) ((char *) entry
 					+ entry->hdr.entry_size);
 			continue;
 		}
 
 		/*
 		 * This is ONLY needed in implementations where
 		 * the capture buffer allocated is too small to capture
 		 * all of the required entries for a given capture mask.
 		 * We need to empty the buffer contents to a file
 		 * if possible, before processing the next entry
 		 * If the buff_full_flag is set, no further capture will happen
 		 * and all remaining non-control entries will be skipped.
 		 */
 		if (entry->hdr.entry_capture_size != 0) {
 			if ((buff_level + entry->hdr.entry_capture_size) >
 				dump_size) {
 				/*  Try to recover by emptying buffer to file */
 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
 				entry = (ql_minidump_entry_t *) ((char *) entry
 						+ entry->hdr.entry_size);
 				continue;
 			}
 		}
 
 		/*
 		 * Decode the entry type and process it accordingly
 		 */
 
 		switch (entry->hdr.entry_type) {
 		case RDNOP:
 			break;
 
 		case RDEND:
 			if (sane_end == 0) {
 				end_cnt = e_cnt;
 			}
 			sane_end++;
 			break;
 
 		case RDCRB:
 			dbuff = dump_buff + buff_level;
 			esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
 			ql_entry_err_chk(entry, esize);
 			buff_level += esize;
 			break;
 
                 case POLLRD:
                         dbuff = dump_buff + buff_level;
                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
                         ql_entry_err_chk(entry, esize);
                         buff_level += esize;
                         break;
 
                 case POLLRDMWR:
                         dbuff = dump_buff + buff_level;
                         esize = ql_pollrd_modify_write(ha, (void *)entry,
 					(void *)dbuff);
                         ql_entry_err_chk(entry, esize);
                         buff_level += esize;
                         break;
 
 		case L2ITG:
 		case L2DTG:
 		case L2DAT:
 		case L2INS:
 			dbuff = dump_buff + buff_level;
 			esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
 			if (esize == -1) {
 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
 			} else {
 				ql_entry_err_chk(entry, esize);
 				buff_level += esize;
 			}
 			break;
 
 		case L1DAT:
 		case L1INS:
 			dbuff = dump_buff + buff_level;
 			esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
 			ql_entry_err_chk(entry, esize);
 			buff_level += esize;
 			break;
 
 		case RDOCM:
 			dbuff = dump_buff + buff_level;
 			esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
 			ql_entry_err_chk(entry, esize);
 			buff_level += esize;
 			break;
 
 		case RDMEM:
 			dbuff = dump_buff + buff_level;
 			esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
 			ql_entry_err_chk(entry, esize);
 			buff_level += esize;
 			break;
 
 		case BOARD:
 		case RDROM:
 			dbuff = dump_buff + buff_level;
 			esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
 			ql_entry_err_chk(entry, esize);
 			buff_level += esize;
 			break;
 
 		case RDMUX:
 			dbuff = dump_buff + buff_level;
 			esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
 			ql_entry_err_chk(entry, esize);
 			buff_level += esize;
 			break;
 
                 case RDMUX2:
                         dbuff = dump_buff + buff_level;
                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
                         ql_entry_err_chk(entry, esize);
                         buff_level += esize;
                         break;
 
 		case QUEUE:
 			dbuff = dump_buff + buff_level;
 			esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
 			ql_entry_err_chk(entry, esize);
 			buff_level += esize;
 			break;
 
 		case CNTRL:
 			if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
 				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
 			}
 			break;
 		default:
 			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
 			break;
 		}
 		/*  next entry in the template */
 		entry = (ql_minidump_entry_t *) ((char *) entry
 						+ entry->hdr.entry_size);
 	}
 
 	if (!sane_start || (sane_end > 1)) {
 		device_printf(ha->pci_dev,
 			"\n%s: Template configuration error. Check Template\n",
 			__func__);
 	}
 	
 	QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
 		__func__, template_hdr->num_of_entries));
 
 	return 0;
 }
 
 /*
  * Read CRB operation.
  */
 static uint32_t
 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
 	uint32_t * data_buff)
 {
 	int loop_cnt;
 	int ret;
 	uint32_t op_count, addr, stride, value = 0;
 
 	addr = crb_entry->addr;
 	op_count = crb_entry->op_count;
 	stride = crb_entry->addr_stride;
 
 	for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
 
 		ret = ql_rdwr_indreg32(ha, addr, &value, 1);
 
 		if (ret)
 			return (0);
 
 		*data_buff++ = addr;
 		*data_buff++ = value;
 		addr = addr + stride;
 	}
 
 	/*
 	 * for testing purpose we return amount of data written
 	 */
 	return (op_count * (2 * sizeof(uint32_t)));
 }
 
 /*
  * Handle L2 Cache.
  */
 
 static uint32_t 
 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
 	uint32_t * data_buff)
 {
 	int i, k;
 	int loop_cnt;
 	int ret;
 
 	uint32_t read_value;
 	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
 	uint32_t tag_value, read_cnt;
 	volatile uint8_t cntl_value_r;
 	long timeout;
 	uint32_t data;
 
 	loop_cnt = cacheEntry->op_count;
 
 	read_addr = cacheEntry->read_addr;
 	cntrl_addr = cacheEntry->control_addr;
 	cntl_value_w = (uint32_t) cacheEntry->write_value;
 
 	tag_reg_addr = cacheEntry->tag_reg_addr;
 
 	tag_value = cacheEntry->init_tag_value;
 	read_cnt = cacheEntry->read_addr_cnt;
 
 	for (i = 0; i < loop_cnt; i++) {
 
 		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
 		if (ret)
 			return (0);
 
 		if (cacheEntry->write_value != 0) { 
 
 			ret = ql_rdwr_indreg32(ha, cntrl_addr,
 					&cntl_value_w, 0);
 			if (ret)
 				return (0);
 		}
 
 		if (cacheEntry->poll_mask != 0) { 
 
 			timeout = cacheEntry->poll_wait;
 
 			ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
 			if (ret)
 				return (0);
 
 			cntl_value_r = (uint8_t)data;
 
 			while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
 
 				if (timeout) {
 					qla_mdelay(__func__, 1);
 					timeout--;
 				} else
 					break;
 
 				ret = ql_rdwr_indreg32(ha, cntrl_addr,
 						&data, 1);
 				if (ret)
 					return (0);
 
 				cntl_value_r = (uint8_t)data;
 			}
 			if (!timeout) {
 				/* Report timeout error. 
 				 * core dump capture failed
 				 * Skip remaining entries.
 				 * Write buffer out to file
 				 * Use driver specific fields in template header
 				 * to report this error.
 				 */
 				return (-1);
 			}
 		}
 
 		addr = read_addr;
 		for (k = 0; k < read_cnt; k++) {
 
 			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
 			if (ret)
 				return (0);
 
 			*data_buff++ = read_value;
 			addr += cacheEntry->read_addr_stride;
 		}
 
 		tag_value += cacheEntry->tag_value_stride;
 	}
 
 	return (read_cnt * loop_cnt * sizeof(uint32_t));
 }
 
 /*
  * Handle L1 Cache.
  */
 
 static uint32_t 
 ql_L1Cache(qla_host_t *ha,
 	ql_minidump_entry_cache_t *cacheEntry,
 	uint32_t *data_buff)
 {
 	int ret;
 	int i, k;
 	int loop_cnt;
 
 	uint32_t read_value;
 	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
 	uint32_t tag_value, read_cnt;
 	uint32_t cntl_value_w;
 
 	loop_cnt = cacheEntry->op_count;
 
 	read_addr = cacheEntry->read_addr;
 	cntrl_addr = cacheEntry->control_addr;
 	cntl_value_w = (uint32_t) cacheEntry->write_value;
 
 	tag_reg_addr = cacheEntry->tag_reg_addr;
 
 	tag_value = cacheEntry->init_tag_value;
 	read_cnt = cacheEntry->read_addr_cnt;
 
 	for (i = 0; i < loop_cnt; i++) {
 
 		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
 		if (ret)
 			return (0);
 
 		ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
 		if (ret)
 			return (0);
 
 		addr = read_addr;
 		for (k = 0; k < read_cnt; k++) {
 
 			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
 			if (ret)
 				return (0);
 
 			*data_buff++ = read_value;
 			addr += cacheEntry->read_addr_stride;
 		}
 
 		tag_value += cacheEntry->tag_value_stride;
 	}
 
 	return (read_cnt * loop_cnt * sizeof(uint32_t));
 }
 
 /*
  * Reading OCM memory
  */
 
 static uint32_t 
 ql_rdocm(qla_host_t *ha,
 	ql_minidump_entry_rdocm_t *ocmEntry,
 	uint32_t *data_buff)
 {
 	int i, loop_cnt;
 	volatile uint32_t addr;
 	volatile uint32_t value;
 
 	addr = ocmEntry->read_addr;
 	loop_cnt = ocmEntry->op_count;
 
 	for (i = 0; i < loop_cnt; i++) {
 		value = READ_REG32(ha, addr);
 		*data_buff++ = value;
 		addr += ocmEntry->read_addr_stride;
 	}
 	return (loop_cnt * sizeof(value));
 }
 
 /*
  * Read memory
  */
 
 static uint32_t 
 ql_rdmem(qla_host_t *ha,
 	ql_minidump_entry_rdmem_t *mem_entry,
 	uint32_t *data_buff)
 {
 	int ret;
         int i, loop_cnt;
         volatile uint32_t addr;
 	q80_offchip_mem_val_t val;
 
         addr = mem_entry->read_addr;
 
 	/* size in bytes / 16 */
         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
 
         for (i = 0; i < loop_cnt; i++) {
 
 		ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
 		if (ret)
 			return (0);
 
                 *data_buff++ = val.data_lo;
                 *data_buff++ = val.data_hi;
                 *data_buff++ = val.data_ulo;
                 *data_buff++ = val.data_uhi;
 
                 addr += (sizeof(uint32_t) * 4);
         }
 
         return (loop_cnt * (sizeof(uint32_t) * 4));
 }
 
 /*
  * Read Rom
  */
 
 static uint32_t 
 ql_rdrom(qla_host_t *ha,
 	ql_minidump_entry_rdrom_t *romEntry,
 	uint32_t *data_buff)
 {
 	int ret;
 	int i, loop_cnt;
 	uint32_t addr;
 	uint32_t value;
 
 	addr = romEntry->read_addr;
 	loop_cnt = romEntry->read_data_size; /* This is size in bytes */
 	loop_cnt /= sizeof(value);
 
 	for (i = 0; i < loop_cnt; i++) {
 
 		ret = ql_rd_flash32(ha, addr, &value);
 		if (ret)
 			return (0);
 
 		*data_buff++ = value;
 		addr += sizeof(value);
 	}
 
 	return (loop_cnt * sizeof(value));
 }
 
 /*
  * Read MUX data
  */
 
 static uint32_t 
 ql_rdmux(qla_host_t *ha,
 	ql_minidump_entry_mux_t *muxEntry,
 	uint32_t *data_buff)
 {
 	int ret;
 	int loop_cnt;
 	uint32_t read_value, sel_value;
 	uint32_t read_addr, select_addr;
 
 	select_addr = muxEntry->select_addr;
 	sel_value = muxEntry->select_value;
 	read_addr = muxEntry->read_addr;
 
 	for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
 
 		ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
 		if (ret)
 			return (0);
 
 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
 		if (ret)
 			return (0);
 
 		*data_buff++ = sel_value;
 		*data_buff++ = read_value;
 
 		sel_value += muxEntry->select_value_stride;
 	}
 
 	return (loop_cnt * (2 * sizeof(uint32_t)));
 }
 
 static uint32_t
 ql_rdmux2(qla_host_t *ha,
 	ql_minidump_entry_mux2_t *muxEntry,
 	uint32_t *data_buff)
 {
 	int ret;
         int loop_cnt;
 
         uint32_t select_addr_1, select_addr_2;
         uint32_t select_value_1, select_value_2;
         uint32_t select_value_count, select_value_mask;
         uint32_t read_addr, read_value;
 
         select_addr_1 = muxEntry->select_addr_1;
         select_addr_2 = muxEntry->select_addr_2;
         select_value_1 = muxEntry->select_value_1;
         select_value_2 = muxEntry->select_value_2;
         select_value_count = muxEntry->select_value_count;
         select_value_mask  = muxEntry->select_value_mask;
 
         read_addr = muxEntry->read_addr;
 
         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
 		loop_cnt++) {
 
                 uint32_t temp_sel_val;
 
 		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
 		if (ret)
 			return (0);
 
                 temp_sel_val = select_value_1 & select_value_mask;
 
 		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
 		if (ret)
 			return (0);
 
 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
 		if (ret)
 			return (0);
 
                 *data_buff++ = temp_sel_val;
                 *data_buff++ = read_value;
 
 		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
 		if (ret)
 			return (0);
 
                 temp_sel_val = select_value_2 & select_value_mask;
 
 		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
 		if (ret)
 			return (0);
 
 		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
 		if (ret)
 			return (0);
 
                 *data_buff++ = temp_sel_val;
                 *data_buff++ = read_value;
 
                 select_value_1 += muxEntry->select_value_stride;
                 select_value_2 += muxEntry->select_value_stride;
         }
 
         return (loop_cnt * (4 * sizeof(uint32_t)));
 }
 
 /*
  * Handling Queue State Reads.
  */
 
 static uint32_t 
 ql_rdqueue(qla_host_t *ha,
 	ql_minidump_entry_queue_t *queueEntry,
 	uint32_t *data_buff)
 {
 	int ret;
 	int loop_cnt, k;
 	uint32_t read_value;
 	uint32_t read_addr, read_stride, select_addr;
 	uint32_t queue_id, read_cnt;
 
 	read_cnt = queueEntry->read_addr_cnt;
 	read_stride = queueEntry->read_addr_stride;
 	select_addr = queueEntry->select_addr;
 
 	for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
 		loop_cnt++) {
 
 		ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
 		if (ret)
 			return (0);
 
 		read_addr = queueEntry->read_addr;
 
 		for (k = 0; k < read_cnt; k++) {
 
 			ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
 			if (ret)
 				return (0);
 
 			*data_buff++ = read_value;
 			read_addr += read_stride;
 		}
 
 		queue_id += queueEntry->queue_id_stride;
 	}
 
 	return (loop_cnt * (read_cnt * sizeof(uint32_t)));
 }
 
 /*
  * Handling control entries.
  */
 
 static uint32_t 
 ql_cntrl(qla_host_t *ha,
 	ql_minidump_template_hdr_t *template_hdr,
 	ql_minidump_entry_cntrl_t *crbEntry)
 {
 	int ret;
 	int count;
 	uint32_t opcode, read_value, addr, entry_addr;
 	long timeout;
 
 	entry_addr = crbEntry->addr;
 
 	for (count = 0; count < crbEntry->op_count; count++) {
 		opcode = crbEntry->opcode;
 
 		if (opcode & QL_DBG_OPCODE_WR) {
 
                 	ret = ql_rdwr_indreg32(ha, entry_addr,
 					&crbEntry->value_1, 0);
 			if (ret)
 				return (0);
 
 			opcode &= ~QL_DBG_OPCODE_WR;
 		}
 
 		if (opcode & QL_DBG_OPCODE_RW) {
 
                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
 			if (ret)
 				return (0);
 
                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
 			if (ret)
 				return (0);
 
 			opcode &= ~QL_DBG_OPCODE_RW;
 		}
 
 		if (opcode & QL_DBG_OPCODE_AND) {
 
                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
 			if (ret)
 				return (0);
 
 			read_value &= crbEntry->value_2;
 			opcode &= ~QL_DBG_OPCODE_AND;
 
 			if (opcode & QL_DBG_OPCODE_OR) {
 				read_value |= crbEntry->value_3;
 				opcode &= ~QL_DBG_OPCODE_OR;
 			}
 
                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
 			if (ret)
 				return (0);
 		}
 
 		if (opcode & QL_DBG_OPCODE_OR) {
 
                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
 			if (ret)
 				return (0);
 
 			read_value |= crbEntry->value_3;
 
                 	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
 			if (ret)
 				return (0);
 
 			opcode &= ~QL_DBG_OPCODE_OR;
 		}
 
 		if (opcode & QL_DBG_OPCODE_POLL) {
 
 			opcode &= ~QL_DBG_OPCODE_POLL;
 			timeout = crbEntry->poll_timeout;
 			addr = entry_addr;
 
                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
 			if (ret)
 				return (0);
 
 			while ((read_value & crbEntry->value_2)
 				!= crbEntry->value_1) {
 
 				if (timeout) {
 					qla_mdelay(__func__, 1);
 					timeout--;
 				} else
 					break;
 
                 		ret = ql_rdwr_indreg32(ha, addr,
 						&read_value, 1);
 				if (ret)
 					return (0);
 			}
 
 			if (!timeout) {
 				/*
 				 * Report timeout error.
 				 * core dump capture failed
 				 * Skip remaining entries.
 				 * Write buffer out to file
 				 * Use driver specific fields in template header
 				 * to report this error.
 				 */
 				return (-1);
 			}
 		}
 
 		if (opcode & QL_DBG_OPCODE_RDSTATE) {
 			/*
 			 * decide which address to use.
 			 */
 			if (crbEntry->state_index_a) {
 				addr = template_hdr->saved_state_array[
 						crbEntry-> state_index_a];
 			} else {
 				addr = entry_addr;
 			}
 
                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
 			if (ret)
 				return (0);
 
 			template_hdr->saved_state_array[crbEntry->state_index_v]
 					= read_value;
 			opcode &= ~QL_DBG_OPCODE_RDSTATE;
 		}
 
 		if (opcode & QL_DBG_OPCODE_WRSTATE) {
 			/*
 			 * decide which value to use.
 			 */
 			if (crbEntry->state_index_v) {
 				read_value = template_hdr->saved_state_array[
 						crbEntry->state_index_v];
 			} else {
 				read_value = crbEntry->value_1;
 			}
 			/*
 			 * decide which address to use.
 			 */
 			if (crbEntry->state_index_a) {
 				addr = template_hdr->saved_state_array[
 						crbEntry-> state_index_a];
 			} else {
 				addr = entry_addr;
 			}
 
                 	ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
 			if (ret)
 				return (0);
 
 			opcode &= ~QL_DBG_OPCODE_WRSTATE;
 		}
 
 		if (opcode & QL_DBG_OPCODE_MDSTATE) {
 			/*  Read value from saved state using index */
 			read_value = template_hdr->saved_state_array[
 						crbEntry->state_index_v];
 
 			read_value <<= crbEntry->shl; /*Shift left operation */
 			read_value >>= crbEntry->shr; /*Shift right operation */
 
 			if (crbEntry->value_2) {
 				/* check if AND mask is provided */
 				read_value &= crbEntry->value_2;
 			}
 
 			read_value |= crbEntry->value_3; /* OR operation */
 			read_value += crbEntry->value_1; /* increment op */
 
 			/* Write value back to state area. */
 
 			template_hdr->saved_state_array[crbEntry->state_index_v]
 					= read_value;
 			opcode &= ~QL_DBG_OPCODE_MDSTATE;
 		}
 
 		entry_addr += crbEntry->addr_stride;
 	}
 
 	return (0);
 }
 
 /*
  * Handling rd poll entry.
  */
 
 static uint32_t 
 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
 	uint32_t *data_buff)
 {
         int ret;
         int loop_cnt;
         uint32_t op_count, select_addr, select_value_stride, select_value;
         uint32_t read_addr, poll, mask, data_size, data;
         uint32_t wait_count = 0;
 
         select_addr            = entry->select_addr;
         read_addr              = entry->read_addr;
         select_value           = entry->select_value;
         select_value_stride    = entry->select_value_stride;
         op_count               = entry->op_count;
         poll                   = entry->poll;
         mask                   = entry->mask;
         data_size              = entry->data_size;
 
         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
 
                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
 		if (ret)
 			return (0);
 
                 wait_count = 0;
 
                 while (wait_count < poll) {
 
                         uint32_t temp;
 
 			ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
 			if (ret)
 				return (0);
 
                         if ( (temp & mask) != 0 ) {
                                 break;
                         }
                         wait_count++;
                 }
 
                 if (wait_count == poll) {
                         device_printf(ha->pci_dev,
 				"%s: Error in processing entry\n", __func__);
                         device_printf(ha->pci_dev,
 				"%s: wait_count <0x%x> poll <0x%x>\n",
 				__func__, wait_count, poll);
                         return 0;
                 }
 
 		ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
 		if (ret)
 			return (0);
 
                 *data_buff++ = select_value;
                 *data_buff++ = data;
                 select_value = select_value + select_value_stride;
         }
 
         /*
          * for testing purpose we return amount of data written
          */
         return (loop_cnt * (2 * sizeof(uint32_t)));
 }
 
 
 /*
  * Handling rd modify write poll entry.
  */
 
 static uint32_t 
 ql_pollrd_modify_write(qla_host_t *ha,
 	ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
 	uint32_t *data_buff)
 {
 	int ret;
         uint32_t addr_1, addr_2, value_1, value_2, data;
         uint32_t poll, mask, data_size, modify_mask;
         uint32_t wait_count = 0;
 
         addr_1		= entry->addr_1;
         addr_2		= entry->addr_2;
         value_1		= entry->value_1;
         value_2		= entry->value_2;
 
         poll		= entry->poll;
         mask		= entry->mask;
         modify_mask	= entry->modify_mask;
         data_size	= entry->data_size;
 
 
 	ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
 	if (ret)
 		return (0);
 
         wait_count = 0;
         while (wait_count < poll) {
 
 		uint32_t temp;
 
 		ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
 		if (ret)
 			return (0);
 
                 if ( (temp & mask) != 0 ) {
                         break;
                 }
                 wait_count++;
         }
 
         if (wait_count == poll) {
                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
 			__func__);
         } else {
 
 		ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
 		if (ret)
 			return (0);
 
                 data = (data & modify_mask);
 
 		ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
 		if (ret)
 			return (0);
 
 		ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
 		if (ret)
 			return (0);
 
                 /* Poll again */
                 wait_count = 0;
                 while (wait_count < poll) {
 
                         uint32_t temp;
 
 			ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
 			if (ret)
 				return (0);
 
                         if ( (temp & mask) != 0 ) {
                                 break;
                         }
                         wait_count++;
                 }
                 *data_buff++ = addr_2;
                 *data_buff++ = data;
         }
 
         /*
          * for testing purpose we return amount of data written
          */
         return (2 * sizeof(uint32_t));
 }
 
 
Index: head/sys/dev/qlxgbe/ql_hw.h
===================================================================
--- head/sys/dev/qlxgbe/ql_hw.h	(revision 321232)
+++ head/sys/dev/qlxgbe/ql_hw.h	(revision 321233)
@@ -1,1754 +1,1755 @@
 /*
  * Copyright (c) 2013-2016 Qlogic Corporation
  * All rights reserved.
  *
  *  Redistribution and use in source and binary forms, with or without
  *  modification, are permitted provided that the following conditions
  *  are met:
  *
  *  1. Redistributions of source code must retain the above copyright
  *     notice, this list of conditions and the following disclaimer.
  *  2. Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
  *
  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  *  POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 /*
  * File: ql_hw.h
  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
  */
 #ifndef _QL_HW_H_
 #define _QL_HW_H_
 
 /*
  * PCIe Registers; Direct Mapped; Offsets from BAR0
  */
 
 /*
  * Register offsets for QLE8030
  */
 
 /*
  * Firmware Mailbox Registers
  *	0 thru 511; offsets 0x800 thru 0xFFC; 32bits each
  */
 #define Q8_FW_MBOX0			0x00000800
 #define Q8_FW_MBOX511			0x00000FFC
 
 /*
  * Host Mailbox Registers
  *	0 thru 511; offsets 0x000 thru 0x7FC; 32bits each
  */
 #define Q8_HOST_MBOX0			0x00000000
 #define Q8_HOST_MBOX511			0x000007FC
 
 #define Q8_MBOX_INT_ENABLE		0x00001000
 #define Q8_MBOX_INT_MASK_MSIX		0x00001200
 #define Q8_MBOX_INT_LEGACY		0x00003010
 
 #define Q8_HOST_MBOX_CNTRL		0x00003038
 #define Q8_FW_MBOX_CNTRL		0x0000303C
 
 #define Q8_PEG_HALT_STATUS1		0x000034A8
 #define Q8_PEG_HALT_STATUS2		0x000034AC
 #define Q8_FIRMWARE_HEARTBEAT		0x000034B0
 
 #define Q8_FLASH_LOCK_ID		0x00003500
 #define Q8_DRIVER_LOCK_ID		0x00003504
 #define Q8_FW_CAPABILITIES		0x00003528
 
 #define Q8_FW_VER_MAJOR			0x00003550
 #define Q8_FW_VER_MINOR			0x00003554
 #define Q8_FW_VER_SUB			0x00003558
 
 #define Q8_BOOTLD_ADDR			0x0000355C
 #define Q8_BOOTLD_SIZE			0x00003560
 
 #define Q8_FW_IMAGE_ADDR		0x00003564
 #define Q8_FW_BUILD_NUMBER		0x00003568
 #define Q8_FW_IMAGE_VALID		0x000035FC
 
 #define Q8_CMDPEG_STATE			0x00003650
 
 #define Q8_LINK_STATE			0x00003698
 #define Q8_LINK_STATE_2			0x0000369C
 
 #define Q8_LINK_SPEED_0			0x000036E0
 #define Q8_LINK_SPEED_1			0x000036E4
 #define Q8_LINK_SPEED_2			0x000036E8
 #define Q8_LINK_SPEED_3			0x000036EC
 
 #define Q8_MAX_LINK_SPEED_0		0x000036F0
 #define Q8_MAX_LINK_SPEED_1		0x000036F4
 #define Q8_MAX_LINK_SPEED_2		0x000036F8
 #define Q8_MAX_LINK_SPEED_3		0x000036FC
 
 #define Q8_ASIC_TEMPERATURE		0x000037B4
 
 /*
  * CRB Window Registers
  *	0 thru 15; offsets 0x3800 thru 0x383C; 32bits each
  */
 #define Q8_CRB_WINDOW_PF0		0x00003800
 #define Q8_CRB_WINDOW_PF15		0x0000383C
 
 #define Q8_FLASH_LOCK			0x00003850
 #define Q8_FLASH_UNLOCK			0x00003854
 
 #define Q8_DRIVER_LOCK			0x00003868
 #define Q8_DRIVER_UNLOCK		0x0000386C
 
 #define Q8_LEGACY_INT_PTR		0x000038C0
 #define Q8_LEGACY_INT_TRIG		0x000038C4
 #define Q8_LEGACY_INT_MASK		0x000038C8
 
 #define Q8_WILD_CARD			0x000038F0
 #define Q8_INFORMANT			0x000038FC
 
 /*
  * Ethernet Interface Specific Registers
  */
 #define Q8_DRIVER_OP_MODE		0x00003570
 #define Q8_API_VERSION			0x0000356C
 #define Q8_NPAR_STATE			0x0000359C
 
 /*
  * End of PCIe Registers; Direct Mapped; Offsets from BAR0
  */
 
 /*
  * Indirect Registers
  */
 #define Q8_LED_DUAL_0			0x28084C80
 #define Q8_LED_SINGLE_0			0x28084C90
 
 #define Q8_LED_DUAL_1			0x28084CA0
 #define Q8_LED_SINGLE_1			0x28084CB0
 
 #define Q8_LED_DUAL_2			0x28084CC0
 #define Q8_LED_SINGLE_2			0x28084CD0
 
 #define Q8_LED_DUAL_3			0x28084CE0
 #define Q8_LED_SINGLE_3			0x28084CF0
 
 #define Q8_GPIO_1			0x28084D00
 #define Q8_GPIO_2			0x28084D10
 #define Q8_GPIO_3			0x28084D20
 #define Q8_GPIO_4			0x28084D40
 #define Q8_GPIO_5			0x28084D50
 #define Q8_GPIO_6			0x28084D60
 #define Q8_GPIO_7			0x42100060
 #define Q8_GPIO_8			0x42100064
 
 #define Q8_FLASH_SPI_STATUS		0x2808E010
 #define Q8_FLASH_SPI_CONTROL		0x2808E014
 
 #define Q8_FLASH_STATUS			0x42100004
 #define Q8_FLASH_CONTROL		0x42110004
 #define Q8_FLASH_ADDRESS		0x42110008
 #define Q8_FLASH_WR_DATA		0x4211000C
 #define Q8_FLASH_RD_DATA		0x42110018
 
 #define Q8_FLASH_DIRECT_WINDOW		0x42110030
 #define Q8_FLASH_DIRECT_DATA		0x42150000
 
 #define Q8_MS_CNTRL			0x41000090
 
 #define Q8_MS_ADDR_LO			0x41000094
 #define Q8_MS_ADDR_HI			0x41000098
 
 #define Q8_MS_WR_DATA_0_31		0x410000A0
 #define Q8_MS_WR_DATA_32_63		0x410000A4
 #define Q8_MS_WR_DATA_64_95		0x410000B0
 #define Q8_MS_WR_DATA_96_127		0x410000B4
 
 #define Q8_MS_RD_DATA_0_31		0x410000A8
 #define Q8_MS_RD_DATA_32_63		0x410000AC
 #define Q8_MS_RD_DATA_64_95		0x410000B8
 #define Q8_MS_RD_DATA_96_127		0x410000BC
 
 #define Q8_CRB_PEG_0			0x3400003c
 #define Q8_CRB_PEG_1			0x3410003c
 #define Q8_CRB_PEG_2			0x3420003c
 #define Q8_CRB_PEG_3			0x3430003c
 #define Q8_CRB_PEG_4			0x34B0003c
 
 /*
  * Macros for reading and writing registers
  */
 
 #if defined(__i386__) || defined(__amd64__)
 #define Q8_MB()    __asm volatile("mfence" ::: "memory")
 #define Q8_WMB()   __asm volatile("sfence" ::: "memory")
 #define Q8_RMB()   __asm volatile("lfence" ::: "memory")
 #else
 #define Q8_MB()
 #define Q8_WMB()
 #define Q8_RMB()
 #endif
 
 #define READ_REG32(ha, reg) bus_read_4((ha->pci_reg), reg)
 
 #define WRITE_REG32(ha, reg, val) \
 	{\
 		bus_write_4((ha->pci_reg), reg, val);\
 		bus_read_4((ha->pci_reg), reg);\
 	}
 
 #define Q8_NUM_MBOX	512
 
 #define Q8_MAX_NUM_MULTICAST_ADDRS	1022
 #define Q8_MAC_ADDR_LEN			6
 
 /*
  * Firmware Interface
  */
 
 /*
  * Command Response Interface - Commands
  */
 
 #define Q8_MBX_CONFIG_IP_ADDRESS		0x0001
 #define Q8_MBX_CONFIG_INTR			0x0002
 #define Q8_MBX_MAP_INTR_SRC			0x0003
 #define Q8_MBX_MAP_SDS_TO_RDS			0x0006
 #define Q8_MBX_CREATE_RX_CNTXT			0x0007
 #define Q8_MBX_DESTROY_RX_CNTXT			0x0008
 #define Q8_MBX_CREATE_TX_CNTXT			0x0009
 #define Q8_MBX_DESTROY_TX_CNTXT			0x000A
 #define Q8_MBX_ADD_RX_RINGS			0x000B
 #define Q8_MBX_CONFIG_LRO_FLOW			0x000C
 #define Q8_MBX_CONFIG_MAC_LEARNING		0x000D
 #define Q8_MBX_GET_STATS			0x000F
 #define Q8_MBX_GENERATE_INTR			0x0011
 #define Q8_MBX_SET_MAX_MTU			0x0012
 #define Q8_MBX_MAC_ADDR_CNTRL			0x001F
 #define Q8_MBX_GET_PCI_CONFIG			0x0020
 #define Q8_MBX_GET_NIC_PARTITION		0x0021
 #define Q8_MBX_SET_NIC_PARTITION		0x0022
 #define Q8_MBX_QUERY_WOL_CAP			0x002C
 #define Q8_MBX_SET_WOL_CONFIG			0x002D
 #define Q8_MBX_GET_MINIDUMP_TMPLT_SIZE		0x002F
 #define Q8_MBX_GET_MINIDUMP_TMPLT		0x0030
 #define Q8_MBX_GET_FW_DCBX_CAPS			0x0034
 #define Q8_MBX_QUERY_DCBX_SETTINGS		0x0035
 #define Q8_MBX_CONFIG_RSS			0x0041
 #define Q8_MBX_CONFIG_RSS_TABLE			0x0042
 #define Q8_MBX_CONFIG_INTR_COALESCE		0x0043
 #define Q8_MBX_CONFIG_LED			0x0044
 #define Q8_MBX_CONFIG_MAC_ADDR			0x0045
 #define Q8_MBX_CONFIG_STATISTICS		0x0046
 #define Q8_MBX_CONFIG_LOOPBACK			0x0047
 #define Q8_MBX_LINK_EVENT_REQ			0x0048
 #define Q8_MBX_CONFIG_MAC_RX_MODE		0x0049
 #define Q8_MBX_CONFIG_FW_LRO			0x004A
 #define Q8_MBX_HW_CONFIG			0x004C
 #define Q8_MBX_INIT_NIC_FUNC			0x0060
 #define Q8_MBX_STOP_NIC_FUNC			0x0061
 #define Q8_MBX_IDC_REQ				0x0062
 #define Q8_MBX_IDC_ACK				0x0063
 #define Q8_MBX_SET_PORT_CONFIG			0x0066
 #define Q8_MBX_GET_PORT_CONFIG			0x0067
 #define Q8_MBX_GET_LINK_STATUS			0x0068
 
 
 
 /*
  * Mailbox Command Response
  */
 #define Q8_MBX_RSP_SUCCESS			0x0001
 #define Q8_MBX_RSP_RESPONSE_FAILURE		0x0002
 #define Q8_MBX_RSP_NO_CARD_CRB			0x0003
 #define Q8_MBX_RSP_NO_CARD_MEM			0x0004
 #define Q8_MBX_RSP_NO_CARD_RSRC			0x0005
 #define Q8_MBX_RSP_INVALID_ARGS			0x0006
 #define Q8_MBX_RSP_INVALID_ACTION		0x0007
 #define Q8_MBX_RSP_INVALID_STATE		0x0008
 #define Q8_MBX_RSP_NOT_SUPPORTED		0x0009
 #define Q8_MBX_RSP_NOT_PERMITTED		0x000A
 #define Q8_MBX_RSP_NOT_READY			0x000B
 #define Q8_MBX_RSP_DOES_NOT_EXIST		0x000C
 #define Q8_MBX_RSP_ALREADY_EXISTS		0x000D
 #define Q8_MBX_RSP_BAD_SIGNATURE		0x000E
 #define Q8_MBX_RSP_CMD_NOT_IMPLEMENTED		0x000F
 #define Q8_MBX_RSP_CMD_INVALID			0x0010
 #define Q8_MBX_RSP_TIMEOUT			0x0011
 #define Q8_MBX_RSP_CMD_FAILED			0x0012
 #define Q8_MBX_RSP_FATAL_TEMP			0x0013
 #define Q8_MBX_RSP_MAX_EXCEEDED			0x0014
 #define Q8_MBX_RSP_UNSPECIFIED			0x0015
 #define Q8_MBX_RSP_INTR_CREATE_FAILED		0x0017
 #define Q8_MBX_RSP_INTR_DELETE_FAILED		0x0018
 #define Q8_MBX_RSP_INTR_INVALID_OP		0x0019
 #define Q8_MBX_RSP_IDC_INTRMD_RSP		0x001A
 
 #define Q8_MBX_CMD_VERSION	(0x2 << 13)
 #define Q8_MBX_RSP_STATUS(x) (((!(x >> 9)) || ((x >> 9) == 1)) ? 0: (x >> 9))
 /*
  * Configure IP Address
  */
 typedef struct _q80_config_ip_addr {
 	uint16_t	opcode;
 	uint16_t 	count_version;
 
 	uint8_t		cmd;
 #define		Q8_MBX_CONFIG_IP_ADD_IP	0x1
 #define		Q8_MBX_CONFIG_IP_DEL_IP	0x2
 
 	uint8_t		ip_type;
 #define		Q8_MBX_CONFIG_IP_V4	0x0
 #define		Q8_MBX_CONFIG_IP_V6	0x1
 
 	uint16_t	rsrvd;
 	union {
 		struct {
 			uint32_t addr;
 			uint32_t rsrvd[3];
 		} ipv4;
 		uint8_t	ipv6_addr[16];
 	} u;
 } __packed q80_config_ip_addr_t;
 
 typedef struct _q80_config_ip_addr_rsp {
         uint16_t	opcode;
         uint16_t	regcnt_status;
 } __packed q80_config_ip_addr_rsp_t;
 
 /*
  * Configure Interrupt Command
  */
 typedef struct _q80_intr {
 	uint8_t		cmd_type;
 #define		Q8_MBX_CONFIG_INTR_CREATE	0x1
 #define		Q8_MBX_CONFIG_INTR_DELETE	0x2
 #define		Q8_MBX_CONFIG_INTR_TYPE_LINE	(0x1 << 4)
 #define		Q8_MBX_CONFIG_INTR_TYPE_MSI_X	(0x3 << 4)
 
 	uint8_t		rsrvd;
 	uint16_t	msix_index;
 } __packed q80_intr_t;
 
 #define Q8_MAX_INTR_VECTORS	16
 typedef struct _q80_config_intr {
 	uint16_t	opcode;
 	uint16_t 	count_version;
 	uint8_t		nentries;
 	uint8_t		rsrvd[3];
 	q80_intr_t	intr[Q8_MAX_INTR_VECTORS];
 } __packed q80_config_intr_t;
 
 typedef struct _q80_intr_rsp {
 	uint8_t		status;
 	uint8_t		cmd;
 	uint16_t	intr_id;
 	uint32_t	intr_src;
 } q80_intr_rsp_t;
 
 typedef struct _q80_config_intr_rsp {
         uint16_t	opcode;
         uint16_t	regcnt_status;
 	uint8_t		nentries;
 	uint8_t		rsrvd[3];
 	q80_intr_rsp_t	intr[Q8_MAX_INTR_VECTORS];
 } __packed q80_config_intr_rsp_t;
 
 /*
  * Configure LRO Flow Command
  */
 typedef struct _q80_config_lro_flow {
 	uint16_t	opcode;
 	uint16_t 	count_version;
 
 	uint8_t		cmd;
 #define Q8_MBX_CONFIG_LRO_FLOW_ADD	0x01
 #define Q8_MBX_CONFIG_LRO_FLOW_DELETE	0x02
 
 	uint8_t		type_ts;
 #define Q8_MBX_CONFIG_LRO_FLOW_IPV4		0x00
 #define Q8_MBX_CONFIG_LRO_FLOW_IPV6		0x01
 #define Q8_MBX_CONFIG_LRO_FLOW_TS_ABSENT	0x00
 #define Q8_MBX_CONFIG_LRO_FLOW_TS_PRESENT	0x02
 
 	uint16_t	rsrvd;
 	union {
 		struct {
 			uint32_t addr;
 			uint32_t rsrvd[3];
 		} ipv4;
 		uint8_t	ipv6_addr[16];
 	} dst;
 	union {
 		struct {
 			uint32_t addr;
 			uint32_t rsrvd[3];
 		} ipv4;
 		uint8_t	ipv6_addr[16];
 	} src;
 	uint16_t	dst_port;
 	uint16_t	src_port;
 } __packed q80_config_lro_flow_t;
 
 typedef struct _q80_config_lro_flow_rsp {
         uint16_t	opcode;
         uint16_t	regcnt_status;
 } __packed q80_config_lro_flow_rsp_t;
 
 typedef struct _q80_set_max_mtu {
 	uint16_t	opcode;
 	uint16_t 	count_version;
 	uint32_t	cntxt_id;
 	uint32_t	mtu;
 } __packed q80_set_max_mtu_t;
 
 typedef struct _q80_set_max_mtu_rsp {
         uint16_t	opcode;
         uint16_t	regcnt_status;
 } __packed q80_set_max_mtu_rsp_t;
 
 /*
  * Configure RSS 
  */
 typedef struct _q80_config_rss {
 	uint16_t	opcode;
 	uint16_t 	count_version;
 
 	uint16_t	cntxt_id;
 	uint16_t	rsrvd;
 
 	uint8_t		hash_type;
 #define Q8_MBX_RSS_HASH_TYPE_IPV4_IP		(0x1 << 4)
 #define Q8_MBX_RSS_HASH_TYPE_IPV4_TCP		(0x2 << 4)
 #define Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP	(0x3 << 4)
 #define Q8_MBX_RSS_HASH_TYPE_IPV6_IP		(0x1 << 6)
 #define Q8_MBX_RSS_HASH_TYPE_IPV6_TCP		(0x2 << 6)
 #define Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP	(0x3 << 6)
 
 	uint8_t		flags;
 #define Q8_MBX_RSS_FLAGS_ENABLE_RSS		(0x1)
 #define Q8_MBX_RSS_FLAGS_USE_IND_TABLE		(0x2)
 #define Q8_MBX_RSS_FLAGS_TYPE_CRSS		(0x4)
 
 	uint16_t	indtbl_mask;
 #define Q8_MBX_RSS_INDTBL_MASK			0x7F
 #define Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID	0x8000
 
 	uint32_t	multi_rss;
 #define Q8_MBX_RSS_MULTI_RSS_ENGINE_ASSIGN	BIT_30
 #define Q8_MBX_RSS_USE_MULTI_RSS_ENGINES	BIT_31
 
 	uint64_t	rss_key[5];
 } __packed q80_config_rss_t;
 
 typedef struct _q80_config_rss_rsp {
         uint16_t	opcode;
         uint16_t	regcnt_status;
 } __packed q80_config_rss_rsp_t;
 
 /*
  * Configure RSS Indirection Table
  */
 #define Q8_RSS_IND_TBL_SIZE	40
 #define Q8_RSS_IND_TBL_MIN_IDX	0
 #define Q8_RSS_IND_TBL_MAX_IDX	127
 
 typedef struct _q80_config_rss_ind_table {
 	uint16_t	opcode;
 	uint16_t 	count_version;
 	uint8_t		start_idx;
 	uint8_t		end_idx;
 	uint16_t 	cntxt_id;
 	uint8_t		ind_table[Q8_RSS_IND_TBL_SIZE];
 } __packed q80_config_rss_ind_table_t;
 
 typedef struct _q80_config_rss_ind_table_rsp {
         uint16_t	opcode;
         uint16_t	regcnt_status;
 } __packed q80_config_rss_ind_table_rsp_t;
 
 /*
  * Configure Interrupt Coalescing and Generation
  */
 typedef struct _q80_config_intr_coalesc {
 	uint16_t	opcode;
 	uint16_t 	count_version;
         uint16_t	flags;
 #define Q8_MBX_INTRC_FLAGS_RCV		1
 #define Q8_MBX_INTRC_FLAGS_XMT		2
 #define Q8_MBX_INTRC_FLAGS_PERIODIC	(1 << 3)
 
         uint16_t	cntxt_id;
         uint16_t	max_pkts;
         uint16_t	max_mswait;
         uint8_t		timer_type;
 #define Q8_MBX_INTRC_TIMER_NONE			0
 #define Q8_MBX_INTRC_TIMER_SINGLE		1
 #define Q8_MBX_INTRC_TIMER_PERIODIC		2
 
         uint16_t	sds_ring_mask;
 
         uint8_t		rsrvd;
         uint32_t	ms_timeout;
 } __packed q80_config_intr_coalesc_t;
 
 typedef struct _q80_config_intr_coalesc_rsp {
         uint16_t	opcode;
         uint16_t	regcnt_status;
 } __packed q80_config_intr_coalesc_rsp_t;
 
 /*
  * Configure MAC Address
  */
 #define Q8_ETHER_ADDR_LEN		6
 typedef struct _q80_mac_addr {
 	uint8_t		addr[Q8_ETHER_ADDR_LEN];
 	uint16_t	vlan_tci;
 } __packed q80_mac_addr_t;
 
 #define Q8_MAX_MAC_ADDRS	64
 
 typedef struct _q80_config_mac_addr {
 	uint16_t	opcode;
 	uint16_t 	count_version;
 	uint8_t		cmd;
 #define Q8_MBX_CMAC_CMD_ADD_MAC_ADDR	1
 #define Q8_MBX_CMAC_CMD_DEL_MAC_ADDR	2
 
 #define Q8_MBX_CMAC_CMD_CAM_BOTH	(0x0 << 6)
 #define Q8_MBX_CMAC_CMD_CAM_INGRESS	(0x1 << 6)
 #define Q8_MBX_CMAC_CMD_CAM_EGRESS	(0x2 << 6)
 
 	uint8_t		nmac_entries;
 	uint16_t	cntxt_id;
 	q80_mac_addr_t	mac_addr[Q8_MAX_MAC_ADDRS];
 } __packed q80_config_mac_addr_t;
 
 typedef struct _q80_config_mac_addr_rsp {
         uint16_t	opcode;
         uint16_t	regcnt_status;
 	uint8_t		cmd;
 	uint8_t		nmac_entries;
 	uint16_t	cntxt_id;
 	uint32_t	status[Q8_MAX_MAC_ADDRS];
 } __packed q80_config_mac_addr_rsp_t;
 
 /*
  * Configure MAC Receive Mode
  */
 typedef struct _q80_config_mac_rcv_mode {
 	uint16_t	opcode;
 	uint16_t 	count_version;
 
 	uint8_t		mode;
 #define Q8_MBX_MAC_RCV_PROMISC_ENABLE	0x1
 #define Q8_MBX_MAC_ALL_MULTI_ENABLE	0x2
 
 	uint8_t		rsrvd;
 	uint16_t	cntxt_id;
 } __packed q80_config_mac_rcv_mode_t;
 
 typedef struct _q80_config_mac_rcv_mode_rsp {
         uint16_t	opcode;
         uint16_t	regcnt_status;
 } __packed q80_config_mac_rcv_mode_rsp_t;
 
 /*
  * Configure Firmware Controlled LRO
  */
 typedef struct _q80_config_fw_lro {
 	uint16_t	opcode;
 	uint16_t 	count_version;
 
 	uint8_t		flags;
 #define Q8_MBX_FW_LRO_IPV4                     0x1
 #define Q8_MBX_FW_LRO_IPV6                     0x2
 #define Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK       0x4
 #define Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK       0x8
 #define Q8_MBX_FW_LRO_LOW_THRESHOLD            0x10
 
 	uint8_t		rsrvd;
 	uint16_t	cntxt_id;
 
 	uint16_t	low_threshold;
 	uint16_t	rsrvd0;
 } __packed q80_config_fw_lro_t;
 
 typedef struct _q80_config_fw_lro_rsp {
         uint16_t	opcode;
         uint16_t	regcnt_status;
 } __packed q80_config_fw_lro_rsp_t;
 
 /*
  * Minidump mailbox commands
  */
 typedef struct _q80_config_md_templ_size {
 	uint16_t	opcode;
 	uint16_t	count_version;
 } __packed q80_config_md_templ_size_t;
 
 typedef struct _q80_config_md_templ_size_rsp {
 	uint16_t	opcode;
 	uint16_t	regcnt_status;
 	uint32_t	rsrvd;
 	uint32_t	templ_size;
 	uint32_t	templ_version;
 } __packed q80_config_md_templ_size_rsp_t;
 
 typedef struct _q80_config_md_templ_cmd {
 	uint16_t	opcode;
 	uint16_t	count_version;
 	uint64_t	buf_addr; /* physical address of buffer */
 	uint32_t	buff_size;
 	uint32_t	offset;
 } __packed q80_config_md_templ_cmd_t;
 
 typedef struct _q80_config_md_templ_cmd_rsp {
 	uint16_t	opcode;
 	uint16_t	regcnt_status;
 	uint32_t	rsrvd;
 	uint32_t	templ_size;
 	uint32_t	buff_size;
 	uint32_t	offset;
 } __packed q80_config_md_templ_cmd_rsp_t;
 
 /*
  * Hardware Configuration Commands
  */
 
 typedef struct _q80_hw_config {
        uint16_t        opcode;
        uint16_t        count_version;
 #define Q8_HW_CONFIG_SET_MDIO_REG_COUNT                0x06
 #define Q8_HW_CONFIG_GET_MDIO_REG_COUNT                0x05
 #define Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT 0x03
 #define Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT 0x02
 #define Q8_HW_CONFIG_SET_TEMP_THRESHOLD_COUNT  0x03
 #define Q8_HW_CONFIG_GET_TEMP_THRESHOLD_COUNT  0x02
 #define Q8_HW_CONFIG_GET_ECC_COUNTS_COUNT      0x02
 
        uint32_t        cmd;
 #define Q8_HW_CONFIG_SET_MDIO_REG              0x01
 #define Q8_HW_CONFIG_GET_MDIO_REG              0x02
 #define Q8_HW_CONFIG_SET_CAM_SEARCH_MODE       0x03
 #define Q8_HW_CONFIG_GET_CAM_SEARCH_MODE       0x04
 #define Q8_HW_CONFIG_SET_TEMP_THRESHOLD                0x07
 #define Q8_HW_CONFIG_GET_TEMP_THRESHOLD                0x08
 #define Q8_HW_CONFIG_GET_ECC_COUNTS            0x0A
 
        union {
                struct {
                        uint32_t phys_port_number;
                        uint32_t phy_dev_addr;
                        uint32_t reg_addr;
                        uint32_t data;
                } set_mdio;
 
                struct {
                        uint32_t phys_port_number;
                        uint32_t phy_dev_addr;
                        uint32_t reg_addr;
                } get_mdio;
 
                struct {
                        uint32_t mode;
 #define Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL  0x1
 #define Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO      0x2
 
                } set_cam_search_mode;
 
                struct {
                        uint32_t value;
                } set_temp_threshold;
        } u;
 } __packed q80_hw_config_t;
 
 typedef struct _q80_hw_config_rsp {
         uint16_t       opcode;
         uint16_t       regcnt_status;
 
        union {
                struct {
                        uint32_t value;
                } get_mdio;
 
                struct {
                        uint32_t mode;
                } get_cam_search_mode;
 
                struct {
                        uint32_t temp_warn;
                        uint32_t curr_temp;
                        uint32_t osc_ring_rate;
                        uint32_t core_voltage;
                } get_temp_threshold;
 
                struct {
                        uint32_t ddr_ecc_error_count;
                        uint32_t ocm_ecc_error_count;
                        uint32_t l2_dcache_ecc_error_count;
                        uint32_t l2_icache_ecc_error_count;
                        uint32_t eport_ecc_error_count;
                } get_ecc_counts;
        } u;
 } __packed q80_hw_config_rsp_t;
 
 /*
  * Link Event Request Command
  */
 typedef struct _q80_link_event {
 	uint16_t	opcode;
 	uint16_t 	count_version;
 	uint8_t		cmd;
 #define Q8_LINK_EVENT_CMD_STOP_PERIODIC	0
 #define Q8_LINK_EVENT_CMD_ENABLE_ASYNC	1
 
 	uint8_t		flags;
 #define Q8_LINK_EVENT_FLAGS_SEND_RSP	1
 
 	uint16_t	cntxt_id;
 } __packed q80_link_event_t;
 
 typedef struct _q80_link_event_rsp {
         uint16_t	opcode;
         uint16_t	regcnt_status;
 } __packed q80_link_event_rsp_t;
 
 /*
  * Get Statistics Command
  */
 typedef struct _q80_rcv_stats {
 	uint64_t	total_bytes;
 	uint64_t	total_pkts;
 	uint64_t	lro_pkt_count;
 	uint64_t	sw_pkt_count;
 	uint64_t	ip_chksum_err;
 	uint64_t	pkts_wo_acntxts;
 	uint64_t	pkts_dropped_no_sds_card;
 	uint64_t	pkts_dropped_no_sds_host;
 	uint64_t	oversized_pkts;
 	uint64_t	pkts_dropped_no_rds;
 	uint64_t	unxpctd_mcast_pkts;
 	uint64_t	re1_fbq_error;
 	uint64_t	invalid_mac_addr;
 	uint64_t	rds_prime_trys;
 	uint64_t	rds_prime_success;
 	uint64_t	lro_flows_added;
 	uint64_t	lro_flows_deleted;
 	uint64_t	lro_flows_active;
 	uint64_t	pkts_droped_unknown;
 	uint64_t	pkts_cnt_oversized;
 } __packed q80_rcv_stats_t;
 
 typedef struct _q80_xmt_stats {
 	uint64_t	total_bytes;
 	uint64_t	total_pkts;
 	uint64_t	errors;
 	uint64_t	pkts_dropped;
 	uint64_t	switch_pkts;
 	uint64_t	num_buffers;
 } __packed q80_xmt_stats_t;
 
 typedef struct _q80_mac_stats {
 	uint64_t	xmt_frames;
 	uint64_t	xmt_bytes;
 	uint64_t	xmt_mcast_pkts;
 	uint64_t	xmt_bcast_pkts;
 	uint64_t	xmt_pause_frames;
 	uint64_t	xmt_cntrl_pkts;
 	uint64_t	xmt_pkt_lt_64bytes;
 	uint64_t	xmt_pkt_lt_127bytes;
 	uint64_t	xmt_pkt_lt_255bytes;
 	uint64_t	xmt_pkt_lt_511bytes;
 	uint64_t	xmt_pkt_lt_1023bytes;
 	uint64_t	xmt_pkt_lt_1518bytes;
 	uint64_t	xmt_pkt_gt_1518bytes;
 	uint64_t	rsrvd0[3];
 	uint64_t	rcv_frames;
 	uint64_t	rcv_bytes;
 	uint64_t	rcv_mcast_pkts;
 	uint64_t	rcv_bcast_pkts;
 	uint64_t	rcv_pause_frames;
 	uint64_t	rcv_cntrl_pkts;
 	uint64_t	rcv_pkt_lt_64bytes;
 	uint64_t	rcv_pkt_lt_127bytes;
 	uint64_t	rcv_pkt_lt_255bytes;
 	uint64_t	rcv_pkt_lt_511bytes;
 	uint64_t	rcv_pkt_lt_1023bytes;
 	uint64_t	rcv_pkt_lt_1518bytes;
 	uint64_t	rcv_pkt_gt_1518bytes;
 	uint64_t	rsrvd1[3];
 	uint64_t	rcv_len_error;
 	uint64_t	rcv_len_small;
 	uint64_t	rcv_len_large;
 	uint64_t	rcv_jabber;
 	uint64_t	rcv_dropped;
 	uint64_t	fcs_error;
 	uint64_t	align_error;
 	uint64_t	eswitched_frames;
 	uint64_t	eswitched_bytes;
 	uint64_t	eswitched_mcast_frames;
 	uint64_t	eswitched_bcast_frames;
 	uint64_t	eswitched_ucast_frames;
 	uint64_t	eswitched_err_free_frames;
 	uint64_t	eswitched_err_free_bytes;
 } __packed q80_mac_stats_t;
 
 typedef struct _q80_get_stats {
 	uint16_t	opcode;
 	uint16_t 	count_version;
 
 	uint32_t 	cmd;
 #define Q8_GET_STATS_CMD_CLEAR		0x01
 #define Q8_GET_STATS_CMD_RCV		0x00
 #define Q8_GET_STATS_CMD_XMT		0x02
 #define Q8_GET_STATS_CMD_TYPE_CNTXT	0x00
 #define Q8_GET_STATS_CMD_TYPE_MAC	0x04
 #define Q8_GET_STATS_CMD_TYPE_FUNC	0x08
 #define Q8_GET_STATS_CMD_TYPE_VPORT	0x0C
 #define Q8_GET_STATS_CMD_TYPE_ALL      (0x7 << 2)
 
 } __packed q80_get_stats_t;
 
 typedef struct _q80_get_stats_rsp {
         uint16_t	opcode;
         uint16_t	regcnt_status;
 	uint32_t 	cmd;
 	union {
 		q80_rcv_stats_t rcv;
 		q80_xmt_stats_t xmt;
 		q80_mac_stats_t mac;
 	} u;
 } __packed q80_get_stats_rsp_t;
 
 typedef struct _q80_get_mac_rcv_xmt_stats_rsp {
 	uint16_t	opcode;
 	uint16_t	regcnt_status;
 	uint32_t	cmd;
 	q80_mac_stats_t mac;
 	q80_rcv_stats_t rcv;
 	q80_xmt_stats_t xmt;
 } __packed q80_get_mac_rcv_xmt_stats_rsp_t;
 
 /*
  * Init NIC Function
  * Used to Register DCBX Configuration Change AEN
  */
 typedef struct _q80_init_nic_func {
         uint16_t        opcode;
         uint16_t        count_version;
 
         uint32_t        options;
 #define Q8_INIT_NIC_REG_IDC_AEN		0x01
 #define Q8_INIT_NIC_REG_DCBX_CHNG_AEN	0x02
 #define Q8_INIT_NIC_REG_SFP_CHNG_AEN	0x04
 
 } __packed q80_init_nic_func_t;
 
 typedef struct _q80_init_nic_func_rsp {
         uint16_t        opcode;
         uint16_t        regcnt_status;
 } __packed q80_init_nic_func_rsp_t;
 
 /*
  * Stop NIC Function
  * Used to DeRegister DCBX Configuration Change AEN
  */
 typedef struct _q80_stop_nic_func {
         uint16_t        opcode;
         uint16_t        count_version;
 
         uint32_t        options;
 #define Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN 0x02
 #define Q8_STOP_NIC_DEREG_SFP_CHNG_AEN	0x04
 
 } __packed q80_stop_nic_func_t;
 
 typedef struct _q80_stop_nic_func_rsp {
         uint16_t        opcode;
         uint16_t        regcnt_status;
 } __packed q80_stop_nic_func_rsp_t;
 
 /*
  * Query Firmware DCBX Capabilities
  */
 typedef struct _q80_query_fw_dcbx_caps {
         uint16_t        opcode;
         uint16_t        count_version;
 } __packed q80_query_fw_dcbx_caps_t;
 
 typedef struct _q80_query_fw_dcbx_caps_rsp {
         uint16_t        opcode;
         uint16_t        regcnt_status;
 
         uint32_t        dcbx_caps;
 #define Q8_QUERY_FW_DCBX_CAPS_TSA               0x00000001
 #define Q8_QUERY_FW_DCBX_CAPS_ETS               0x00000002
 #define Q8_QUERY_FW_DCBX_CAPS_DCBX_CEE_1_01     0x00000004
 #define Q8_QUERY_FW_DCBX_CAPS_DCBX_IEEE_1_0     0x00000008
 #define Q8_QUERY_FW_DCBX_MAX_TC_MASK            0x00F00000
 #define Q8_QUERY_FW_DCBX_MAX_ETS_TC_MASK        0x0F000000
 #define Q8_QUERY_FW_DCBX_MAX_PFC_TC_MASK        0xF0000000
 
 } __packed q80_query_fw_dcbx_caps_rsp_t;
 
 /*
  * IDC Ack Cmd
  */
 
 typedef struct _q80_idc_ack {
 	uint16_t	opcode;
 	uint16_t	count_version;
 
 	uint32_t	aen_mb1;
 	uint32_t	aen_mb2;
 	uint32_t	aen_mb3;
 	uint32_t	aen_mb4;
 
 } __packed q80_idc_ack_t;
 
 typedef struct _q80_idc_ack_rsp {
 	uint16_t	opcode;
 	uint16_t	regcnt_status;
 } __packed q80_idc_ack_rsp_t;
 
 
 /*
  * Set Port Configuration command
  * Used to set Ethernet Standard Pause values
  */
 
 typedef struct _q80_set_port_cfg {
 	uint16_t	opcode;
 	uint16_t	count_version;
 
 	uint32_t	cfg_bits;
 
 #define Q8_PORT_CFG_BITS_LOOPBACK_MODE_MASK	(0x7 << 1)
 #define Q8_PORT_CFG_BITS_LOOPBACK_MODE_NONE	(0x0 << 1)
 #define Q8_PORT_CFG_BITS_LOOPBACK_MODE_HSS	(0x2 << 1)
 #define Q8_PORT_CFG_BITS_LOOPBACK_MODE_PHY	(0x3 << 1)
 #define Q8_PORT_CFG_BITS_LOOPBACK_MODE_EXT	(0x4 << 1)
 
 #define Q8_VALID_LOOPBACK_MODE(mode) \
              (((mode) == Q8_PORT_CFG_BITS_LOOPBACK_MODE_NONE) || \
 		(((mode) >= Q8_PORT_CFG_BITS_LOOPBACK_MODE_HSS) && \
 		 ((mode) <= Q8_PORT_CFG_BITS_LOOPBACK_MODE_EXT)))
 
 #define Q8_PORT_CFG_BITS_DCBX_ENABLE		BIT_4
 
 #define Q8_PORT_CFG_BITS_PAUSE_CFG_MASK		(0x3 << 5)
 #define Q8_PORT_CFG_BITS_PAUSE_DISABLED		(0x0 << 5)
 #define Q8_PORT_CFG_BITS_PAUSE_STD		(0x1 << 5)
 #define Q8_PORT_CFG_BITS_PAUSE_PPM		(0x2 << 5)
 
 #define Q8_PORT_CFG_BITS_LNKCAP_10MB		BIT_8
 #define Q8_PORT_CFG_BITS_LNKCAP_100MB		BIT_9
 #define Q8_PORT_CFG_BITS_LNKCAP_1GB		BIT_10
 #define Q8_PORT_CFG_BITS_LNKCAP_10GB		BIT_11
 
 #define Q8_PORT_CFG_BITS_AUTONEG		BIT_15
 #define Q8_PORT_CFG_BITS_XMT_DISABLE		BIT_17
 #define Q8_PORT_CFG_BITS_FEC_RQSTD		BIT_18
 #define Q8_PORT_CFG_BITS_EEE_RQSTD		BIT_19
 
 #define Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK	(0x3 << 20)
 #define Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV	(0x0 << 20)
 #define Q8_PORT_CFG_BITS_STDPAUSE_XMT		(0x1 << 20)
 #define Q8_PORT_CFG_BITS_STDPAUSE_RCV		(0x2 << 20)
 
 } __packed q80_set_port_cfg_t;
 
 typedef struct _q80_set_port_cfg_rsp {
 	uint16_t	opcode;
 	uint16_t	regcnt_status;
 } __packed q80_set_port_cfg_rsp_t;
 
 /*
  * Get Port Configuration Command
  */
 
 typedef struct _q80_get_port_cfg {
 	uint16_t	opcode;
 	uint16_t	count_version;
 } __packed q80_get_port_cfg_t;
 
 typedef struct _q80_get_port_cfg_rsp {
 	uint16_t	opcode;
 	uint16_t	regcnt_status;
 
 	uint32_t	cfg_bits; /* same as in q80_set_port_cfg_t */
 
 	uint8_t		phys_port_type;
 	uint8_t		rsvd[3];
 } __packed q80_get_port_cfg_rsp_t;
 
 /*
  * Get Link Status Command
  * Used to get current PAUSE values for the port
  */
 
 typedef struct _q80_get_link_status {
         uint16_t        opcode;
         uint16_t        count_version;
 } __packed q80_get_link_status_t;
 
 typedef struct _q80_get_link_status_rsp {
         uint16_t        opcode;
         uint16_t        regcnt_status;
 
 	uint32_t	cfg_bits;
 #define Q8_GET_LINK_STAT_CFG_BITS_LINK_UP		BIT_0
 
 #define Q8_GET_LINK_STAT_CFG_BITS_LINK_SPEED_MASK	(0x7 << 3)
 #define Q8_GET_LINK_STAT_CFG_BITS_LINK_SPEED_UNKNOWN	(0x0 << 3)
 #define Q8_GET_LINK_STAT_CFG_BITS_LINK_SPEED_10MB	(0x1 << 3)
 #define Q8_GET_LINK_STAT_CFG_BITS_LINK_SPEED_100MB	(0x2 << 3)
 #define Q8_GET_LINK_STAT_CFG_BITS_LINK_SPEED_1GB	(0x3 << 3)
 #define Q8_GET_LINK_STAT_CFG_BITS_LINK_SPEED_10GB	(0x4 << 3)
 
 #define Q8_GET_LINK_STAT_CFG_BITS_PAUSE_CFG_MASK	(0x3 << 6)
 #define Q8_GET_LINK_STAT_CFG_BITS_PAUSE_CFG_DISABLE	(0x0 << 6)
 #define Q8_GET_LINK_STAT_CFG_BITS_PAUSE_CFG_STD		(0x1 << 6)
 #define Q8_GET_LINK_STAT_CFG_BITS_PAUSE_CFG_PPM		(0x2 << 6)
 
 #define Q8_GET_LINK_STAT_CFG_BITS_LOOPBACK_MASK		(0x7 << 8)
 #define Q8_GET_LINK_STAT_CFG_BITS_LOOPBACK_NONE		(0x0 << 6)
 #define Q8_GET_LINK_STAT_CFG_BITS_LOOPBACK_HSS		(0x2 << 6)
 #define Q8_GET_LINK_STAT_CFG_BITS_LOOPBACK_PHY		(0x3 << 6)
 
 #define Q8_GET_LINK_STAT_CFG_BITS_FEC_ENABLED		BIT_12
 #define Q8_GET_LINK_STAT_CFG_BITS_EEE_ENABLED		BIT_13
 
 #define Q8_GET_LINK_STAT_CFG_BITS_STDPAUSE_DIR_MASK	(0x3 << 20)
 #define Q8_GET_LINK_STAT_CFG_BITS_STDPAUSE_NONE		(0x0 << 20)
 #define Q8_GET_LINK_STAT_CFG_BITS_STDPAUSE_XMT		(0x1 << 20)
 #define Q8_GET_LINK_STAT_CFG_BITS_STDPAUSE_RCV		(0x2 << 20)
 #define Q8_GET_LINK_STAT_CFG_BITS_STDPAUSE_XMT_RCV	(0x3 << 20)
 
 	uint32_t	link_state;
 #define Q8_GET_LINK_STAT_LOSS_OF_SIGNAL			BIT_0
 #define Q8_GET_LINK_STAT_PORT_RST_DONE			BIT_3
 #define Q8_GET_LINK_STAT_PHY_LINK_DOWN			BIT_4
 #define Q8_GET_LINK_STAT_PCS_LINK_DOWN			BIT_5
 #define Q8_GET_LINK_STAT_MAC_LOCAL_FAULT		BIT_6
 #define Q8_GET_LINK_STAT_MAC_REMOTE_FAULT		BIT_7
 #define Q8_GET_LINK_STAT_XMT_DISABLED			BIT_9
 #define Q8_GET_LINK_STAT_SFP_XMT_FAULT			BIT_10
 
 	uint32_t	sfp_info;
 #define Q8_GET_LINK_STAT_SFP_TRNCVR_MASK		0x3
 #define Q8_GET_LINK_STAT_SFP_TRNCVR_NOT_EXPECTED	0x0
 #define Q8_GET_LINK_STAT_SFP_TRNCVR_NONE		0x1
 #define Q8_GET_LINK_STAT_SFP_TRNCVR_INVALID		0x2
 #define Q8_GET_LINK_STAT_SFP_TRNCVR_VALID		0x3
 
 #define Q8_GET_LINK_STAT_SFP_ADDTL_INFO_MASK		(0x3 << 2)
 #define Q8_GET_LINK_STAT_SFP_ADDTL_INFO_UNREC_TRSVR	(0x0 << 2)
 #define Q8_GET_LINK_STAT_SFP_ADDTL_INFO_NOT_QLOGIC	(0x1 << 2)
 #define Q8_GET_LINK_STAT_SFP_ADDTL_INFO_SPEED_FAILED	(0x2 << 2)
 #define Q8_GET_LINK_STAT_SFP_ADDTL_INFO_ACCESS_ERROR	(0x3 << 2)
 
 #define Q8_GET_LINK_STAT_SFP_MOD_TYPE_MASK		(0x1F << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_NONE			(0x00 << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_10GBLRM		(0x01 << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_10GBLR			(0x02 << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_10GBSR			(0x03 << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_10GBC_P		(0x04 << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_10GBC_AL		(0x05 << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_10GBC_PL		(0x06 << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_1GBSX			(0x07 << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_1GBLX			(0x08 << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_1GBCX			(0x09 << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_1GBT			(0x0A << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_1GBC_PL		(0x0B << 4)
 #define Q8_GET_LINK_STAT_SFP_MOD_UNKNOWN		(0x0F << 4)
 
 #define Q8_GET_LINK_STAT_SFP_MULTI_RATE_MOD		BIT_9
 #define Q8_GET_LINK_STAT_SFP_XMT_FAULT			BIT_10
 #define Q8_GET_LINK_STAT_SFP_COPPER_CBL_LENGTH_MASK	(0xFF << 16)
 
 } __packed q80_get_link_status_rsp_t;
 
 
 /*
  * Transmit Related Definitions
  */
 /* Max# of TX Rings per Tx Create Cntxt Mbx Cmd*/
 #define MAX_TCNTXT_RINGS           8
 
 /*
  * Transmit Context - Q8_CMD_CREATE_TX_CNTXT Command Configuration Data
  */
 
 typedef struct _q80_rq_tx_ring {
 	uint64_t	paddr;
 	uint64_t	tx_consumer;
 	uint16_t	nentries;
 	uint16_t	intr_id;
 	uint8_t 	intr_src_bit;
 	uint8_t 	rsrvd[3];
 } __packed q80_rq_tx_ring_t;
 
 typedef struct _q80_rq_tx_cntxt {
 	uint16_t		opcode;
 	uint16_t 		count_version;
 
 	uint32_t		cap0;
 #define Q8_TX_CNTXT_CAP0_BASEFW		(1 << 0)
 #define Q8_TX_CNTXT_CAP0_LSO		(1 << 6)
 #define Q8_TX_CNTXT_CAP0_TC		(1 << 25)
 
 	uint32_t		cap1;
 	uint32_t		cap2;
 	uint32_t		cap3;
 	uint8_t			ntx_rings;
 	uint8_t			traffic_class; /* bits 8-10; others reserved */
 	uint16_t		tx_vpid;
 	q80_rq_tx_ring_t	tx_ring[MAX_TCNTXT_RINGS];
 } __packed q80_rq_tx_cntxt_t;
 
 typedef struct _q80_rsp_tx_ring {
 	uint32_t		prod_index;
 	uint16_t		cntxt_id;
 	uint8_t			state;
 	uint8_t			rsrvd;
 } q80_rsp_tx_ring_t;
 
 typedef struct _q80_rsp_tx_cntxt {
         uint16_t                opcode;
         uint16_t                regcnt_status;
 	uint8_t			ntx_rings;
         uint8_t                 phy_port;
         uint8_t                 virt_port;
 	uint8_t                 rsrvd;
 	q80_rsp_tx_ring_t	tx_ring[MAX_TCNTXT_RINGS];
 } __packed q80_rsp_tx_cntxt_t;
 
 typedef struct _q80_tx_cntxt_destroy {
         uint16_t        opcode;
 	uint16_t 	count_version;
         uint32_t        cntxt_id;
 } __packed q80_tx_cntxt_destroy_t;
 
 typedef struct _q80_tx_cntxt_destroy_rsp {
 	uint16_t	opcode;
 	uint16_t	regcnt_status;
 } __packed q80_tx_cntxt_destroy_rsp_t;
 
 /*
  * Transmit Command Descriptor
  * These commands are issued on the Transmit Ring associated with a Transmit
  * context
  */
 typedef struct _q80_tx_cmd {
 	uint8_t		tcp_hdr_off;	/* TCP Header Offset */
 	uint8_t		ip_hdr_off;	/* IP Header Offset */
 	uint16_t	flags_opcode;	/* Bits 0-6: flags; 7-12: opcode */
 
 	/* flags field */
 #define Q8_TX_CMD_FLAGS_MULTICAST	0x01
 #define Q8_TX_CMD_FLAGS_LSO_TSO		0x02
 #define Q8_TX_CMD_FLAGS_VLAN_TAGGED	0x10
 #define Q8_TX_CMD_FLAGS_HW_VLAN_ID	0x40
 
 	/* opcode field */
 #define Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6	(0xC << 7)
 #define Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6	(0xB << 7)
 #define Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6		(0x6 << 7)
 #define Q8_TX_CMD_OP_XMT_TCP_LSO		(0x5 << 7)
 #define Q8_TX_CMD_OP_XMT_UDP_CHKSUM		(0x3 << 7)
 #define Q8_TX_CMD_OP_XMT_TCP_CHKSUM		(0x2 << 7)
 #define Q8_TX_CMD_OP_XMT_ETHER			(0x1 << 7)
 
 	uint8_t		n_bufs;		/* # of data segs in data buffer */
 	uint8_t		data_len_lo;	/* data length lower 8 bits */
 	uint16_t	data_len_hi;	/* data length upper 16 bits */
 
 	uint64_t	buf2_addr;	/* buffer 2 address */
 
 	uint16_t	rsrvd0;
 	uint16_t	mss;		/* MSS for this packet */
 	uint8_t		cntxtid;	/* Bits 7-4: ContextId; 3-0: reserved */
 
 #define Q8_TX_CMD_PORT_CNXTID(c_id) ((c_id & 0xF) << 4)
 
 	uint8_t		total_hdr_len;	/* MAC+IP+TCP Header Length for LSO */
 	uint16_t	rsrvd1;
 
 	uint64_t	buf3_addr;	/* buffer 3 address */
 	uint64_t	buf1_addr;	/* buffer 1 address */
 
 	uint16_t	buf1_len;	/* length of buffer 1 */
 	uint16_t	buf2_len;	/* length of buffer 2 */
 	uint16_t	buf3_len;	/* length of buffer 3 */
 	uint16_t	buf4_len;	/* length of buffer 4 */
 
 	uint64_t	buf4_addr;	/* buffer 4 address */
 
 	uint32_t	rsrvd2;
 	uint16_t	rsrvd3;
 	uint16_t	vlan_tci;	/* VLAN TCI when hw tagging is enabled*/
 
 } __packed q80_tx_cmd_t; /* 64 bytes */
 
 #define Q8_TX_CMD_MAX_SEGMENTS		4
 #define Q8_TX_CMD_TSO_ALIGN		2
 #define Q8_TX_MAX_NON_TSO_SEGS		62
 
 
 /*
  * Receive Related Definitions
  */
 #define MAX_RDS_RING_SETS	8 /* Max# of Receive Descriptor Rings */
 
 #ifdef QL_ENABLE_ISCSI_TLV
 #define MAX_SDS_RINGS           32 /* Max# of Status Descriptor Rings */
 #define NUM_TX_RINGS		(MAX_SDS_RINGS * 2)
 #else
 #define MAX_SDS_RINGS           4 /* Max# of Status Descriptor Rings */
 #define NUM_TX_RINGS		MAX_SDS_RINGS
 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
 #define MAX_RDS_RINGS           MAX_SDS_RINGS /* Max# of Rcv Descriptor Rings */
 
 
 typedef struct _q80_rq_sds_ring {
 	uint64_t paddr; /* physical addr of status ring in system memory */
 	uint64_t hdr_split1;
 	uint64_t hdr_split2;
 	uint16_t size; /* number of entries in status ring */
 	uint16_t hdr_split1_size;
 	uint16_t hdr_split2_size;
 	uint16_t hdr_split_count;
 	uint16_t intr_id;
 	uint8_t  intr_src_bit;
 	uint8_t  rsrvd[5];
 } __packed q80_rq_sds_ring_t; /* 10 32bit words */
 
 typedef struct _q80_rq_rds_ring {
 	uint64_t paddr_std;	/* physical addr of rcv ring in system memory */
 	uint64_t paddr_jumbo;	/* physical addr of rcv ring in system memory */
 	uint16_t std_bsize;
 	uint16_t std_nentries;
 	uint16_t jumbo_bsize;
 	uint16_t jumbo_nentries;
 } __packed q80_rq_rds_ring_t; /* 6 32bit words */
 
 #define MAX_RCNTXT_SDS_RINGS	8
 
 typedef struct _q80_rq_rcv_cntxt {
 	uint16_t		opcode;
 	uint16_t 		count_version;
 	uint32_t		cap0;
 #define Q8_RCV_CNTXT_CAP0_BASEFW	(1 << 0)
 #define Q8_RCV_CNTXT_CAP0_MULTI_RDS	(1 << 1)
 #define Q8_RCV_CNTXT_CAP0_LRO		(1 << 5)
 #define Q8_RCV_CNTXT_CAP0_HW_LRO	(1 << 10)
 #define Q8_RCV_CNTXT_CAP0_VLAN_ALIGN	(1 << 14)
 #define Q8_RCV_CNTXT_CAP0_RSS		(1 << 15)
 #define Q8_RCV_CNTXT_CAP0_MSFT_RSS	(1 << 16)
 #define Q8_RCV_CNTXT_CAP0_SGL_JUMBO	(1 << 18)
 #define Q8_RCV_CNTXT_CAP0_SGL_LRO	(1 << 19)
 #define Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO	(1 << 26)
 
 	uint32_t		cap1;
 	uint32_t		cap2;
 	uint32_t		cap3;
 	uint8_t 		nrds_sets_rings;
 	uint8_t 		nsds_rings;
 	uint16_t		rds_producer_mode;
 #define Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE	0
 #define Q8_RCV_CNTXT_RDS_PROD_MODE_SHARED	1
 
 	uint16_t		rcv_vpid;
 	uint16_t		rsrvd0;
 	uint32_t		rsrvd1;
 	q80_rq_sds_ring_t	sds[MAX_RCNTXT_SDS_RINGS];
 	q80_rq_rds_ring_t	rds[MAX_RDS_RING_SETS];
 } __packed q80_rq_rcv_cntxt_t;
 
 typedef struct _q80_rsp_rds_ring {
 	uint32_t prod_std;
 	uint32_t prod_jumbo;
 } __packed q80_rsp_rds_ring_t; /* 8 bytes */
 
 typedef struct _q80_rsp_rcv_cntxt {
 	uint16_t		opcode;
 	uint16_t		regcnt_status;
 	uint8_t 		nrds_sets_rings;
 	uint8_t 		nsds_rings;
 	uint16_t		cntxt_id;
 	uint8_t			state;
 	uint8_t			num_funcs;
 	uint8_t			phy_port;
 	uint8_t			virt_port;
 	uint32_t		sds_cons[MAX_RCNTXT_SDS_RINGS];
 	q80_rsp_rds_ring_t	rds[MAX_RDS_RING_SETS];		
 } __packed q80_rsp_rcv_cntxt_t;
 
 typedef struct _q80_rcv_cntxt_destroy {
 	uint16_t	opcode;
 	uint16_t 	count_version;
 	uint32_t	cntxt_id;
 } __packed q80_rcv_cntxt_destroy_t;
 
 typedef struct _q80_rcv_cntxt_destroy_rsp {
 	uint16_t	opcode;
 	uint16_t	regcnt_status;
 } __packed q80_rcv_cntxt_destroy_rsp_t;
 
 
 /*
  * Add Receive Rings
  */
 typedef struct _q80_rq_add_rcv_rings {
 	uint16_t		opcode;
 	uint16_t		count_version;
 	uint8_t			nrds_sets_rings;
 	uint8_t			nsds_rings;
 	uint16_t		cntxt_id;
 	q80_rq_sds_ring_t	sds[MAX_RCNTXT_SDS_RINGS];
 	q80_rq_rds_ring_t	rds[MAX_RDS_RING_SETS];
 } __packed q80_rq_add_rcv_rings_t;
 
 typedef struct _q80_rsp_add_rcv_rings {
 	uint16_t		opcode;
 	uint16_t		regcnt_status;
 	uint8_t			nrds_sets_rings;
 	uint8_t			nsds_rings;
 	uint16_t		cntxt_id;
 	uint32_t		sds_cons[MAX_RCNTXT_SDS_RINGS];
 	q80_rsp_rds_ring_t	rds[MAX_RDS_RING_SETS];		
 } __packed q80_rsp_add_rcv_rings_t;
 
 /*
  * Map Status Ring to Receive Descriptor Set
  */
 
 #define MAX_SDS_TO_RDS_MAP      16
 
 typedef struct _q80_sds_rds_map_e {
         uint8_t sds_ring;
         uint8_t rsrvd0;
         uint8_t rds_ring;
         uint8_t rsrvd1;
 } __packed q80_sds_rds_map_e_t;
 
 typedef struct _q80_rq_map_sds_to_rds {
         uint16_t                opcode;
         uint16_t                count_version;
         uint16_t                cntxt_id;
         uint16_t                num_rings;
         q80_sds_rds_map_e_t     sds_rds[MAX_SDS_TO_RDS_MAP];
 } __packed q80_rq_map_sds_to_rds_t;
 
 
 typedef struct _q80_rsp_map_sds_to_rds {
         uint16_t                opcode;
         uint16_t                regcnt_status;
         uint16_t                cntxt_id;
         uint16_t                num_rings;
         q80_sds_rds_map_e_t     sds_rds[MAX_SDS_TO_RDS_MAP];
 } __packed q80_rsp_map_sds_to_rds_t;
 
 
 /*
  * Receive Descriptor corresponding to each entry in the receive ring
  */
 typedef struct _q80_rcv_desc {
 	uint16_t handle;
 	uint16_t rsrvd;
 	uint32_t buf_size; /* buffer size in bytes */
 	uint64_t buf_addr; /* physical address of buffer */
 } __packed q80_recv_desc_t;
 
 /*
  * Status Descriptor corresponding to each entry in the Status ring
  */
 typedef struct _q80_stat_desc {
 	uint64_t data[2];
 } __packed q80_stat_desc_t;
 
 /*
  * definitions for data[0] field of Status Descriptor
  */
 #define Q8_STAT_DESC_RSS_HASH(data)		(data & 0xFFFFFFFF)
 #define Q8_STAT_DESC_TOTAL_LENGTH(data)		((data >> 32) & 0x3FFF)
 #define Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(data)	((data >> 32) & 0xFFFF)
 #define Q8_STAT_DESC_HANDLE(data)		((data >> 48) & 0xFFFF)
 /*
  * definitions for data[1] field of Status Descriptor
  */
 
 #define Q8_STAT_DESC_OPCODE(data)		((data >> 42) & 0xF)
 #define		Q8_STAT_DESC_OPCODE_RCV_PKT		0x01
 #define		Q8_STAT_DESC_OPCODE_LRO_PKT		0x02
 #define		Q8_STAT_DESC_OPCODE_SGL_LRO		0x04
 #define		Q8_STAT_DESC_OPCODE_SGL_RCV		0x05
 #define		Q8_STAT_DESC_OPCODE_CONT		0x06
 
 /*
  * definitions for data[1] field of Status Descriptor for standard frames
  * status descriptor opcode equals 0x04
  */
 #define Q8_STAT_DESC_STATUS(data)		((data >> 39) & 0x0007)
 #define		Q8_STAT_DESC_STATUS_CHKSUM_NOT_DONE	0x00
 #define		Q8_STAT_DESC_STATUS_NO_CHKSUM		0x01
 #define		Q8_STAT_DESC_STATUS_CHKSUM_OK		0x02
 #define		Q8_STAT_DESC_STATUS_CHKSUM_ERR		0x03
 
 #define Q8_STAT_DESC_VLAN(data)			((data >> 47) & 1)
 #define Q8_STAT_DESC_VLAN_ID(data)		((data >> 48) & 0xFFFF)
 
 #define Q8_STAT_DESC_PROTOCOL(data)		((data >> 44) & 0x000F)
 #define Q8_STAT_DESC_L2_OFFSET(data)		((data >> 48) & 0x001F)
 #define Q8_STAT_DESC_COUNT(data)		((data >> 37) & 0x0007)
 
 /*
  * definitions for data[0-1] fields of Status Descriptor for LRO
  * status descriptor opcode equals 0x04
  */
 
 /* definitions for data[1] field */
 #define Q8_LRO_STAT_DESC_SEQ_NUM(data)		(uint32_t)(data)
 
 /*
  * definitions specific to opcode 0x04 data[1]
  */
 #define	Q8_STAT_DESC_COUNT_SGL_LRO(data)	((data >> 13) & 0x0007)
 #define Q8_SGL_LRO_STAT_L2_OFFSET(data)         ((data >> 16) & 0xFF)
 #define Q8_SGL_LRO_STAT_L4_OFFSET(data)         ((data >> 24) & 0xFF)
 #define Q8_SGL_LRO_STAT_TS(data)                ((data >> 40) & 0x1)
 #define Q8_SGL_LRO_STAT_PUSH_BIT(data)          ((data >> 41) & 0x1)
 
 
 /*
  * definitions specific to opcode 0x05 data[1]
  */
 #define	Q8_STAT_DESC_COUNT_SGL_RCV(data)	((data >> 37) & 0x0003)
 
 /*
  * definitions for opcode 0x06
  */
 /* definitions for data[0] field */
 #define Q8_SGL_STAT_DESC_HANDLE1(data)          (data & 0xFFFF)
 #define Q8_SGL_STAT_DESC_HANDLE2(data)          ((data >> 16) & 0xFFFF)
 #define Q8_SGL_STAT_DESC_HANDLE3(data)          ((data >> 32) & 0xFFFF)
 #define Q8_SGL_STAT_DESC_HANDLE4(data)          ((data >> 48) & 0xFFFF)
 
 /* definitions for data[1] field */
 #define Q8_SGL_STAT_DESC_HANDLE5(data)          (data & 0xFFFF)
 #define Q8_SGL_STAT_DESC_HANDLE6(data)          ((data >> 16) & 0xFFFF)
 #define Q8_SGL_STAT_DESC_NUM_HANDLES(data)      ((data >> 32) & 0x7)
 #define Q8_SGL_STAT_DESC_HANDLE7(data)          ((data >> 48) & 0xFFFF)
 
 /** Driver Related Definitions Begin **/
 
 #define TX_SMALL_PKT_SIZE	128 /* size in bytes of small packets */
 
 /* The number of descriptors should be a power of 2 */
 #define NUM_TX_DESCRIPTORS		1024
 #define NUM_STATUS_DESCRIPTORS		1024
 
 
 #define NUM_RX_DESCRIPTORS	2048
 
 /*
  * structure describing various dma buffers
  */
 
 typedef struct qla_dmabuf {
         volatile struct {
                 uint32_t        tx_ring		:1,
                                 rds_ring	:1,
                                 sds_ring	:1,
 				minidump	:1;
         } flags;
 
         qla_dma_t               tx_ring;
         qla_dma_t               rds_ring[MAX_RDS_RINGS];
         qla_dma_t               sds_ring[MAX_SDS_RINGS];
 	qla_dma_t		minidump;
 } qla_dmabuf_t;
 
 typedef struct _qla_sds {
         q80_stat_desc_t *sds_ring_base; /* start of sds ring */
         uint32_t        sdsr_next; /* next entry in SDS ring to process */
         struct lro_ctrl lro;
         void            *rxb_free;
         uint32_t        rx_free;
         volatile uint32_t rcv_active;
 	uint32_t	sds_consumer;
 	uint64_t	intr_count;
 	uint64_t	spurious_intr_count;
 } qla_sds_t;
 
 #define Q8_MAX_LRO_CONT_DESC    7
 #define Q8_MAX_HANDLES_LRO      (1 + (Q8_MAX_LRO_CONT_DESC * 7))
 #define Q8_MAX_HANDLES_NON_LRO  8
 
 typedef struct _qla_sgl_rcv {
         uint16_t        pkt_length;
         uint16_t        num_handles;
         uint16_t        chksum_status;
         uint32_t        rss_hash;
         uint16_t        rss_hash_flags;
         uint16_t        vlan_tag;
         uint16_t        handle[Q8_MAX_HANDLES_NON_LRO];
 } qla_sgl_rcv_t;
 
 typedef struct _qla_sgl_lro {
         uint16_t        flags;
 #define Q8_LRO_COMP_TS          0x1
 #define Q8_LRO_COMP_PUSH_BIT    0x2
         uint16_t        l2_offset;
         uint16_t        l4_offset;
 
         uint16_t        payload_length;
         uint16_t        num_handles;
         uint32_t        rss_hash;
         uint16_t        rss_hash_flags;
         uint16_t        vlan_tag;
         uint16_t        handle[Q8_MAX_HANDLES_LRO];
 } qla_sgl_lro_t;
 
 typedef union {
         qla_sgl_rcv_t   rcv;
         qla_sgl_lro_t   lro;
 } qla_sgl_comp_t;
 
 #define QL_FRAME_HDR_SIZE (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +\
 		sizeof (struct ip6_hdr) + sizeof (struct tcphdr) + 16)
 
 typedef struct _qla_hw_tx_cntxt {
 	q80_tx_cmd_t    *tx_ring_base;
 	bus_addr_t	tx_ring_paddr;
 
 	volatile uint32_t *tx_cons; /* tx consumer shadow reg */
 	bus_addr_t      tx_cons_paddr;
 
 	volatile uint32_t txr_free; /* # of free entries in tx ring */
 	volatile uint32_t txr_next; /* # next available tx ring entry */
 	volatile uint32_t txr_comp; /* index of last tx entry completed */
 
 	uint32_t        tx_prod_reg;
 	uint16_t	tx_cntxt_id;
 
 } qla_hw_tx_cntxt_t;
 
 typedef struct _qla_mcast {
 	uint16_t	rsrvd;
 	uint8_t		addr[ETHER_ADDR_LEN];
 } __packed qla_mcast_t; 
 
 typedef struct _qla_rdesc {
         volatile uint32_t prod_std;
         volatile uint32_t prod_jumbo;
         volatile uint32_t rx_next; /* next standard rcv ring to arm fw */
         volatile int32_t  rx_in; /* next standard rcv ring to add mbufs */
 	volatile uint64_t count;
 } qla_rdesc_t;
 
 typedef struct _qla_flash_desc_table {
 	uint32_t	flash_valid;
 	uint16_t	flash_ver;
 	uint16_t	flash_len;
 	uint16_t	flash_cksum;
 	uint16_t	flash_unused;
 	uint8_t		flash_model[16];
 	uint16_t	flash_manuf;
 	uint16_t	flash_id;
 	uint8_t		flash_flag;
 	uint8_t		erase_cmd;
 	uint8_t		alt_erase_cmd;
 	uint8_t		write_enable_cmd;
 	uint8_t		write_enable_bits;
 	uint8_t		write_statusreg_cmd;
 	uint8_t		unprotected_sec_cmd;
 	uint8_t		read_manuf_cmd;
 	uint32_t	block_size;
 	uint32_t	alt_block_size;
 	uint32_t	flash_size;
 	uint32_t	write_enable_data;
 	uint8_t		readid_addr_len;
 	uint8_t		write_disable_bits;
 	uint8_t		read_dev_id_len;
 	uint8_t		chip_erase_cmd;
 	uint16_t	read_timeo;
 	uint8_t		protected_sec_cmd;
 	uint8_t		resvd[65];
 } __packed qla_flash_desc_table_t;
 
 /*
  * struct for storing hardware specific information for a given interface
  */
 typedef struct _qla_hw {
 	struct {
 		uint32_t
 			unicast_mac	:1,
 			bcast_mac	:1,
 			loopback_mode	:2,
 			init_tx_cnxt	:1,
 			init_rx_cnxt	:1,
 			init_intr_cnxt	:1,
 			fduplex		:1,
 			autoneg		:1,
 			fdt_valid	:1;
 	} flags;
 
 
 	uint16_t	link_speed;
 	uint16_t	cable_length;
 	uint32_t	cable_oui;
 	uint8_t		link_up;
 	uint8_t		module_type;
 	uint8_t		link_faults;
 
 	uint8_t		mac_rcv_mode;
 
 	uint32_t	max_mtu;
 
 	uint8_t		mac_addr[ETHER_ADDR_LEN];
 
 	uint32_t	num_sds_rings;
 	uint32_t	num_rds_rings;
 	uint32_t	num_tx_rings;
 
         qla_dmabuf_t	dma_buf;
 	
 	/* Transmit Side */
 
 	qla_hw_tx_cntxt_t tx_cntxt[NUM_TX_RINGS];
 
 	/* Receive Side */
 
 	uint16_t	rcv_cntxt_id;
 
 	uint32_t	mbx_intr_mask_offset;
 
 	uint16_t	intr_id[MAX_SDS_RINGS];
 	uint32_t	intr_src[MAX_SDS_RINGS];
 
 	qla_sds_t	sds[MAX_SDS_RINGS]; 
 	uint32_t	mbox[Q8_NUM_MBOX];
 	qla_rdesc_t	rds[MAX_RDS_RINGS];		
 
 	uint32_t	rds_pidx_thres;
 	uint32_t	sds_cidx_thres;
 
 	uint32_t	rcv_intr_coalesce;
 	uint32_t	xmt_intr_coalesce;
 
 	/* Immediate Completion */
 	volatile uint32_t imd_compl;
 	volatile uint32_t aen_mb0;
 	volatile uint32_t aen_mb1;
 	volatile uint32_t aen_mb2;
 	volatile uint32_t aen_mb3;
 	volatile uint32_t aen_mb4;
 
 	/* multicast address list */
 	uint32_t	nmcast;
 	qla_mcast_t	mcast[Q8_MAX_NUM_MULTICAST_ADDRS];
 	uint8_t		mac_addr_arr[(Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)];
 
 	/* reset sequence */
 #define Q8_MAX_RESET_SEQ_IDX	16
 	uint32_t	rst_seq[Q8_MAX_RESET_SEQ_IDX];
 	uint32_t	rst_seq_idx;
 
 	/* heart beat register value */
 	uint32_t	hbeat_value;
 	uint32_t	health_count;
+	uint32_t	hbeat_failure;
 
 	uint32_t	max_tx_segs;
 	uint32_t	min_lro_pkt_size;
 	
 	uint32_t        enable_hw_lro;
 	uint32_t        enable_soft_lro;
 	uint32_t        enable_9kb;
 
 	uint32_t	user_pri_nic;
 	uint32_t	user_pri_iscsi;
 	uint64_t	iscsi_pkt_count;
 
 	/* Flash Descriptor Table */
 	qla_flash_desc_table_t fdt;
 
 	/* Minidump Related */
 	uint32_t	mdump_init;
 	uint32_t	mdump_done;
 	uint32_t	mdump_active;
 	uint32_t	mdump_capture_mask;
 	uint32_t	mdump_start_seq_index;
 	void		*mdump_buffer;
 	uint32_t	mdump_buffer_size;
 	void		*mdump_template;
 	uint32_t	mdump_template_size;
 } qla_hw_t;
 
 #define QL_UPDATE_RDS_PRODUCER_INDEX(ha, prod_reg, val) \
 		bus_write_4((ha->pci_reg), prod_reg, val);
 
 #define QL_UPDATE_TX_PRODUCER_INDEX(ha, val, i) \
 		WRITE_REG32(ha, ha->hw.tx_cntxt[i].tx_prod_reg, val)
 
 #define QL_UPDATE_SDS_CONSUMER_INDEX(ha, i, val) \
 	bus_write_4((ha->pci_reg), (ha->hw.sds[i].sds_consumer), val);
 
 #define QL_ENABLE_INTERRUPTS(ha, i) \
 		bus_write_4((ha->pci_reg), (ha->hw.intr_src[i]), 0);
 
 #define QL_BUFFER_ALIGN                16
 
 
 /*
  * Flash Configuration 
  */
 #define Q8_BOARD_CONFIG_OFFSET		0x370000
 #define Q8_BOARD_CONFIG_LENGTH		0x2000
 
 #define Q8_BOARD_CONFIG_MAC0_LO		0x400
 
 #define Q8_FDT_LOCK_MAGIC_ID		0x00FD00FD
 #define Q8_FDT_FLASH_ADDR_VAL		0xFD009F
 #define Q8_FDT_FLASH_CTRL_VAL		0x3F
 #define Q8_FDT_MASK_VAL			0xFF
 
 #define Q8_WR_ENABLE_FL_ADDR		0xFD0100
 #define Q8_WR_ENABLE_FL_CTRL		0x5
 
 #define Q8_ERASE_LOCK_MAGIC_ID		0x00EF00EF
 #define Q8_ERASE_FL_ADDR_MASK		0xFD0300
 #define Q8_ERASE_FL_CTRL_MASK		0x3D
 
 #define Q8_WR_FL_LOCK_MAGIC_ID		0xABCDABCD
 #define Q8_WR_FL_ADDR_MASK		0x800000
 #define Q8_WR_FL_CTRL_MASK		0x3D
 
 #define QL_FDT_OFFSET			0x3F0000
 #define Q8_FLASH_SECTOR_SIZE		0x10000
 
 /*
  * Off Chip Memory Access
  */
 
 typedef struct _q80_offchip_mem_val {
         uint32_t data_lo;
         uint32_t data_hi;
         uint32_t data_ulo;
         uint32_t data_uhi;
 } q80_offchip_mem_val_t;
 
 #endif /* #ifndef _QL_HW_H_ */
Index: head/sys/dev/qlxgbe/ql_os.c
===================================================================
--- head/sys/dev/qlxgbe/ql_os.c	(revision 321232)
+++ head/sys/dev/qlxgbe/ql_os.c	(revision 321233)
@@ -1,2042 +1,2042 @@
 /*
  * Copyright (c) 2013-2016 Qlogic Corporation
  * All rights reserved.
  *
  *  Redistribution and use in source and binary forms, with or without
  *  modification, are permitted provided that the following conditions
  *  are met:
  *
  *  1. Redistributions of source code must retain the above copyright
  *     notice, this list of conditions and the following disclaimer.
  *  2. Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
  *
  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  *  POSSIBILITY OF SUCH DAMAGE.
  */
 
 /*
  * File: ql_os.c
  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 
 #include "ql_os.h"
 #include "ql_hw.h"
 #include "ql_def.h"
 #include "ql_inline.h"
 #include "ql_ver.h"
 #include "ql_glbl.h"
 #include "ql_dbg.h"
 #include <sys/smp.h>
 
 /*
  * Some PCI Configuration Space Related Defines
  */
 
 #ifndef PCI_VENDOR_QLOGIC
 #define PCI_VENDOR_QLOGIC	0x1077
 #endif
 
 #ifndef PCI_PRODUCT_QLOGIC_ISP8030
 #define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
 #endif
 
 #define PCI_QLOGIC_ISP8030 \
 	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
 
 /*
  * static functions
  */
 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
 static void qla_free_parent_dma_tag(qla_host_t *ha);
 static int qla_alloc_xmt_bufs(qla_host_t *ha);
 static void qla_free_xmt_bufs(qla_host_t *ha);
 static int qla_alloc_rcv_bufs(qla_host_t *ha);
 static void qla_free_rcv_bufs(qla_host_t *ha);
 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
 
 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
 static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
 static void qla_release(qla_host_t *ha);
 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
 		int error);
 static void qla_stop(qla_host_t *ha);
 static void qla_get_peer(qla_host_t *ha);
 static void qla_error_recovery(void *context, int pending);
 static void qla_async_event(void *context, int pending);
 static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
 		uint32_t iscsi_pdu);
 
 /*
  * Hooks to the Operating Systems
  */
 static int qla_pci_probe (device_t);
 static int qla_pci_attach (device_t);
 static int qla_pci_detach (device_t);
 
 static void qla_init(void *arg);
 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
 static int qla_media_change(struct ifnet *ifp);
 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
 
 static int qla_transmit(struct ifnet *ifp, struct mbuf  *mp);
 static void qla_qflush(struct ifnet *ifp);
 static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
 static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
 static int qla_create_fp_taskqueues(qla_host_t *ha);
 static void qla_destroy_fp_taskqueues(qla_host_t *ha);
 static void qla_drain_fp_taskqueues(qla_host_t *ha);
 
 static device_method_t qla_pci_methods[] = {
 	/* Device interface */
 	DEVMETHOD(device_probe, qla_pci_probe),
 	DEVMETHOD(device_attach, qla_pci_attach),
 	DEVMETHOD(device_detach, qla_pci_detach),
 	{ 0, 0 }
 };
 
 static driver_t qla_pci_driver = {
 	"ql", qla_pci_methods, sizeof (qla_host_t),
 };
 
 static devclass_t qla83xx_devclass;
 
 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
 
 MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
 MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
 
 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
 
 #define QL_STD_REPLENISH_THRES		0
 #define QL_JUMBO_REPLENISH_THRES	32
 
 
 static char dev_str[64];
 static char ver_str[64];
 
 /*
  * Name:	qla_pci_probe
  * Function:	Validate the PCI device to be a QLA80XX device
  */
 static int
 qla_pci_probe(device_t dev)
 {
         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
         case PCI_QLOGIC_ISP8030:
 		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
 			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
 			QLA_VERSION_BUILD);
 		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
 			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
 			QLA_VERSION_BUILD);
                 device_set_desc(dev, dev_str);
                 break;
         default:
                 return (ENXIO);
         }
 
         if (bootverbose)
                 printf("%s: %s\n ", __func__, dev_str);
 
         return (BUS_PROBE_DEFAULT);
 }
 
 static void
 qla_add_sysctls(qla_host_t *ha)
 {
         device_t dev = ha->pci_dev;
 
 	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 		OID_AUTO, "version", CTLFLAG_RD,
 		ver_str, 0, "Driver Version");
 
         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
                 (void *)ha, 0,
                 qla_sysctl_get_stats, "I", "Statistics");
 
         SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "fw_version", CTLFLAG_RD,
                 ha->fw_ver_str, 0, "firmware version");
 
         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
                 (void *)ha, 0,
                 qla_sysctl_get_link_status, "I", "Link Status");
 
 	ha->dbg_level = 0;
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "debug", CTLFLAG_RW,
                 &ha->dbg_level, ha->dbg_level, "Debug Level");
 
 	ha->std_replenish = QL_STD_REPLENISH_THRES;
         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "std_replenish", CTLFLAG_RW,
                 &ha->std_replenish, ha->std_replenish,
                 "Threshold for Replenishing Standard Frames");
 
         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "ipv4_lro",
                 CTLFLAG_RD, &ha->ipv4_lro,
                 "number of ipv4 lro completions");
 
         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
                 OID_AUTO, "ipv6_lro",
                 CTLFLAG_RD, &ha->ipv6_lro,
                 "number of ipv6 lro completions");
 
 	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 		OID_AUTO, "tx_tso_frames",
 		CTLFLAG_RD, &ha->tx_tso_frames,
 		"number of Tx TSO Frames");
 
 	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 		OID_AUTO, "hw_vlan_tx_frames",
 		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
 		"number of Tx VLAN Frames");
 
         return;
 }
 
 static void
 qla_watchdog(void *arg)
 {
 	qla_host_t *ha = arg;
 	qla_hw_t *hw;
 	struct ifnet *ifp;
 	uint32_t i;
 
 	hw = &ha->hw;
 	ifp = ha->ifp;
 
         if (ha->flags.qla_watchdog_exit) {
 		ha->qla_watchdog_exited = 1;
 		return;
 	}
 	ha->qla_watchdog_exited = 0;
 
 	if (!ha->flags.qla_watchdog_pause) {
 		if (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
 			(ha->msg_from_peer == QL_PEER_MSG_RESET)) {
 			ha->qla_watchdog_paused = 1;
 			ha->flags.qla_watchdog_pause = 1;
 			ha->qla_initiate_recovery = 0;
 			ha->err_inject = 0;
 			device_printf(ha->pci_dev,
 				"%s: taskqueue_enqueue(err_task) \n", __func__);
 			taskqueue_enqueue(ha->err_tq, &ha->err_task);
 		} else if (ha->flags.qla_interface_up) {
 
                         if (ha->async_event) {
                                 ha->async_event = 0;
                                 taskqueue_enqueue(ha->async_event_tq,
                                         &ha->async_event_task);
                         }
 
 			for (i = 0; i < ha->hw.num_sds_rings; i++) {
 				qla_tx_fp_t *fp = &ha->tx_fp[i];
 
 				if (fp->fp_taskqueue != NULL)
 					taskqueue_enqueue(fp->fp_taskqueue,
 						&fp->fp_task);
 			}
 
 			ha->qla_watchdog_paused = 0;
 		} else {
 			ha->qla_watchdog_paused = 0;
 		}
 	} else {
 		ha->qla_watchdog_paused = 1;
 	}
 
-	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
+	ha->watchdog_ticks = ha->watchdog_ticks++ % 500;
 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
 		qla_watchdog, ha);
 }
 
 /*
  * Name:	qla_pci_attach
  * Function:	attaches the device to the operating system
  */
 static int
 qla_pci_attach(device_t dev)
 {
 	qla_host_t *ha = NULL;
 	uint32_t rsrc_len;
 	int i;
 	uint32_t num_rcvq = 0;
 
         if ((ha = device_get_softc(dev)) == NULL) {
                 device_printf(dev, "cannot get softc\n");
                 return (ENOMEM);
         }
 
         memset(ha, 0, sizeof (qla_host_t));
 
         if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
                 device_printf(dev, "device is not ISP8030\n");
                 return (ENXIO);
 	}
 
         ha->pci_func = pci_get_function(dev) & 0x1;
 
         ha->pci_dev = dev;
 
 	pci_enable_busmaster(dev);
 
 	ha->reg_rid = PCIR_BAR(0);
 	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
 				RF_ACTIVE);
 
         if (ha->pci_reg == NULL) {
                 device_printf(dev, "unable to map any ports\n");
                 goto qla_pci_attach_err;
         }
 
 	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
 					ha->reg_rid);
 
 	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
 
 	qla_add_sysctls(ha);
 	ql_hw_add_sysctls(ha);
 
 	ha->flags.lock_init = 1;
 
 	ha->reg_rid1 = PCIR_BAR(2);
 	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
 			&ha->reg_rid1, RF_ACTIVE);
 
 	ha->msix_count = pci_msix_count(dev);
 
 	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
 		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
 			ha->msix_count);
 		goto qla_pci_attach_err;
 	}
 
 	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
 		" msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
 		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
 		ha->pci_reg1));
 
         /* initialize hardware */
         if (ql_init_hw(ha)) {
                 device_printf(dev, "%s: ql_init_hw failed\n", __func__);
                 goto qla_pci_attach_err;
         }
 
         device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
                 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
                 ha->fw_ver_build);
         snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
                         ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
                         ha->fw_ver_build);
 
         if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
                 device_printf(dev, "%s: qla_get_nic_partition failed\n",
                         __func__);
                 goto qla_pci_attach_err;
         }
         device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
                 " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
 		__func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
 		ha->pci_reg, ha->pci_reg1, num_rcvq);
 
 
 #ifdef QL_ENABLE_ISCSI_TLV
         if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
                 ha->hw.num_sds_rings = 15;
                 ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
         }
 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
 	ha->hw.num_rds_rings = ha->hw.num_sds_rings;
 
 	ha->msix_count = ha->hw.num_sds_rings + 1;
 
 	if (pci_alloc_msix(dev, &ha->msix_count)) {
 		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
 			ha->msix_count);
 		ha->msix_count = 0;
 		goto qla_pci_attach_err;
 	}
 
 	ha->mbx_irq_rid = 1;
 	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
 				&ha->mbx_irq_rid,
 				(RF_ACTIVE | RF_SHAREABLE));
 	if (ha->mbx_irq == NULL) {
 		device_printf(dev, "could not allocate mbx interrupt\n");
 		goto qla_pci_attach_err;
 	}
 	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
 		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
 		device_printf(dev, "could not setup mbx interrupt\n");
 		goto qla_pci_attach_err;
 	}
 
 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
 		ha->irq_vec[i].sds_idx = i;
                 ha->irq_vec[i].ha = ha;
                 ha->irq_vec[i].irq_rid = 2 + i;
 
 		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
 				&ha->irq_vec[i].irq_rid,
 				(RF_ACTIVE | RF_SHAREABLE));
 
 		if (ha->irq_vec[i].irq == NULL) {
 			device_printf(dev, "could not allocate interrupt\n");
 			goto qla_pci_attach_err;
 		}
 		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
 			(INTR_TYPE_NET | INTR_MPSAFE),
 			NULL, ql_isr, &ha->irq_vec[i],
 			&ha->irq_vec[i].handle)) {
 			device_printf(dev, "could not setup interrupt\n");
 			goto qla_pci_attach_err;
 		}
 
 		ha->tx_fp[i].ha = ha;
 		ha->tx_fp[i].txr_idx = i;
 
 		if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
 			device_printf(dev, "%s: could not allocate tx_br[%d]\n",
 				__func__, i);
 			goto qla_pci_attach_err;
 		}
 	}
 
 	if (qla_create_fp_taskqueues(ha) != 0)
 		goto qla_pci_attach_err;
 
 	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
 		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
 
 	ql_read_mac_addr(ha);
 
 	/* allocate parent dma tag */
 	if (qla_alloc_parent_dma_tag(ha)) {
 		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
 			__func__);
 		goto qla_pci_attach_err;
 	}
 
 	/* alloc all dma buffers */
 	if (ql_alloc_dma(ha)) {
 		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
 		goto qla_pci_attach_err;
 	}
 	qla_get_peer(ha);
 
 	if (ql_minidump_init(ha) != 0) {
 		device_printf(dev, "%s: ql_minidump_init failed\n", __func__);
 		goto qla_pci_attach_err;
 	}
 	/* create the o.s ethernet interface */
 	qla_init_ifnet(dev, ha);
 
 	ha->flags.qla_watchdog_active = 1;
 	ha->flags.qla_watchdog_pause = 0;
 
 	callout_init(&ha->tx_callout, TRUE);
 	ha->flags.qla_callout_init = 1;
 
 	/* create ioctl device interface */
 	if (ql_make_cdev(ha)) {
 		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
 		goto qla_pci_attach_err;
 	}
 
 	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
 		qla_watchdog, ha);
 
 	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
 	ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT,
 			taskqueue_thread_enqueue, &ha->err_tq);
 	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
 		device_get_nameunit(ha->pci_dev));
 
         TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
         ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT,
                         taskqueue_thread_enqueue, &ha->async_event_tq);
         taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
                 device_get_nameunit(ha->pci_dev));
 
 	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
         return (0);
 
 qla_pci_attach_err:
 
 	qla_release(ha);
 
 	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
         return (ENXIO);
 }
 
 /*
  * Name:	qla_pci_detach
  * Function:	Unhooks the device from the operating system
  */
 static int
 qla_pci_detach(device_t dev)
 {
 	qla_host_t *ha = NULL;
 	struct ifnet *ifp;
 
 	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
 
         if ((ha = device_get_softc(dev)) == NULL) {
                 device_printf(dev, "cannot get softc\n");
                 return (ENOMEM);
         }
 
 	ifp = ha->ifp;
 
 	QLA_LOCK(ha);
 	qla_stop(ha);
 	QLA_UNLOCK(ha);
 
 	qla_release(ha);
 
 	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
 
         return (0);
 }
 
 /*
  * SYSCTL Related Callbacks
  */
 static int
 qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
 {
 	int err, ret = 0;
 	qla_host_t *ha;
 
 	err = sysctl_handle_int(oidp, &ret, 0, req);
 
 	if (err || !req->newptr)
 		return (err);
 
 	if (ret == 1) {
 		ha = (qla_host_t *)arg1;
 		ql_get_stats(ha);
 	}
 	return (err);
 }
 static int
 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
 {
 	int err, ret = 0;
 	qla_host_t *ha;
 
 	err = sysctl_handle_int(oidp, &ret, 0, req);
 
 	if (err || !req->newptr)
 		return (err);
 
 	if (ret == 1) {
 		ha = (qla_host_t *)arg1;
 		ql_hw_link_status(ha);
 	}
 	return (err);
 }
 
 /*
  * Name:	qla_release
  * Function:	Releases the resources allocated for the device
  */
 static void
 qla_release(qla_host_t *ha)
 {
 	device_t dev;
 	int i;
 
 	dev = ha->pci_dev;
 
         if (ha->async_event_tq) {
                 taskqueue_drain(ha->async_event_tq, &ha->async_event_task);
                 taskqueue_free(ha->async_event_tq);
         }
 
 	if (ha->err_tq) {
 		taskqueue_drain(ha->err_tq, &ha->err_task);
 		taskqueue_free(ha->err_tq);
 	}
 
 	ql_del_cdev(ha);
 
 	if (ha->flags.qla_watchdog_active) {
 		ha->flags.qla_watchdog_exit = 1;
 
 		while (ha->qla_watchdog_exited == 0)
 			qla_mdelay(__func__, 1);
 	}
 
 	if (ha->flags.qla_callout_init)
 		callout_stop(&ha->tx_callout);
 
 	if (ha->ifp != NULL)
 		ether_ifdetach(ha->ifp);
 
 	ql_free_dma(ha); 
 	qla_free_parent_dma_tag(ha);
 
 	if (ha->mbx_handle)
 		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
 
 	if (ha->mbx_irq)
 		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
 				ha->mbx_irq);
 
 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
 
 		if (ha->irq_vec[i].handle) {
 			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
 					ha->irq_vec[i].handle);
 		}
 			
 		if (ha->irq_vec[i].irq) {
 			(void)bus_release_resource(dev, SYS_RES_IRQ,
 				ha->irq_vec[i].irq_rid,
 				ha->irq_vec[i].irq);
 		}
 
 		qla_free_tx_br(ha, &ha->tx_fp[i]);
 	}
 	qla_destroy_fp_taskqueues(ha);
 
 	if (ha->msix_count)
 		pci_release_msi(dev);
 
 	if (ha->flags.lock_init) {
 		mtx_destroy(&ha->hw_lock);
 	}
 
         if (ha->pci_reg)
                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
 				ha->pci_reg);
 
         if (ha->pci_reg1)
                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
 				ha->pci_reg1);
 }
 
 /*
  * DMA Related Functions
  */
 
 static void
 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
 {
         *((bus_addr_t *)arg) = 0;
 
         if (error) {
                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
                 return;
 	}
 
         *((bus_addr_t *)arg) = segs[0].ds_addr;
 
 	return;
 }
 
 int
 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
 {
         int             ret = 0;
         device_t        dev;
         bus_addr_t      b_addr;
 
         dev = ha->pci_dev;
 
         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
 
         ret = bus_dma_tag_create(
                         ha->parent_tag,/* parent */
                         dma_buf->alignment,
                         ((bus_size_t)(1ULL << 32)),/* boundary */
                         BUS_SPACE_MAXADDR,      /* lowaddr */
                         BUS_SPACE_MAXADDR,      /* highaddr */
                         NULL, NULL,             /* filter, filterarg */
                         dma_buf->size,          /* maxsize */
                         1,                      /* nsegments */
                         dma_buf->size,          /* maxsegsize */
                         0,                      /* flags */
                         NULL, NULL,             /* lockfunc, lockarg */
                         &dma_buf->dma_tag);
 
         if (ret) {
                 device_printf(dev, "%s: could not create dma tag\n", __func__);
                 goto ql_alloc_dmabuf_exit;
         }
         ret = bus_dmamem_alloc(dma_buf->dma_tag,
                         (void **)&dma_buf->dma_b,
                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
                         &dma_buf->dma_map);
         if (ret) {
                 bus_dma_tag_destroy(dma_buf->dma_tag);
                 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
                 goto ql_alloc_dmabuf_exit;
         }
 
         ret = bus_dmamap_load(dma_buf->dma_tag,
                         dma_buf->dma_map,
                         dma_buf->dma_b,
                         dma_buf->size,
                         qla_dmamap_callback,
                         &b_addr, BUS_DMA_NOWAIT);
 
         if (ret || !b_addr) {
                 bus_dma_tag_destroy(dma_buf->dma_tag);
                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
                         dma_buf->dma_map);
                 ret = -1;
                 goto ql_alloc_dmabuf_exit;
         }
 
         dma_buf->dma_addr = b_addr;
 
 ql_alloc_dmabuf_exit:
         QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
                 __func__, ret, (void *)dma_buf->dma_tag,
                 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
 		dma_buf->size));
 
         return ret;
 }
 
 void
 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
 {
 	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 
         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
         bus_dma_tag_destroy(dma_buf->dma_tag);
 }
 
 static int
 qla_alloc_parent_dma_tag(qla_host_t *ha)
 {
 	int		ret;
 	device_t	dev;
 
 	dev = ha->pci_dev;
 
         /*
          * Allocate parent DMA Tag
          */
         ret = bus_dma_tag_create(
                         bus_get_dma_tag(dev),   /* parent */
                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
                         BUS_SPACE_MAXADDR,      /* lowaddr */
                         BUS_SPACE_MAXADDR,      /* highaddr */
                         NULL, NULL,             /* filter, filterarg */
                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
                         0,                      /* nsegments */
                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
                         0,                      /* flags */
                         NULL, NULL,             /* lockfunc, lockarg */
                         &ha->parent_tag);
 
         if (ret) {
                 device_printf(dev, "%s: could not create parent dma tag\n",
                         __func__);
 		return (-1);
         }
 
         ha->flags.parent_tag = 1;
 	
 	return (0);
 }
 
 static void
 qla_free_parent_dma_tag(qla_host_t *ha)
 {
         if (ha->flags.parent_tag) {
                 bus_dma_tag_destroy(ha->parent_tag);
                 ha->flags.parent_tag = 0;
         }
 }
 
 /*
  * Name: qla_init_ifnet
  * Function: Creates the Network Device Interface and Registers it with the O.S
  */
 
 static void
 qla_init_ifnet(device_t dev, qla_host_t *ha)
 {
 	struct ifnet *ifp;
 
 	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
 
 	ifp = ha->ifp = if_alloc(IFT_ETHER);
 
 	if (ifp == NULL)
 		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
 
 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 
 	ifp->if_baudrate = IF_Gbps(10);
 	ifp->if_capabilities = IFCAP_LINKSTATE;
 	ifp->if_mtu = ETHERMTU;
 
 	ifp->if_init = qla_init;
 	ifp->if_softc = ha;
 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 	ifp->if_ioctl = qla_ioctl;
 
 	ifp->if_transmit = qla_transmit;
 	ifp->if_qflush = qla_qflush;
 
 	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
 	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
 	IFQ_SET_READY(&ifp->if_snd);
 
 	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
 
 	ether_ifattach(ifp, qla_get_mac_addr(ha));
 
 	ifp->if_capabilities |= IFCAP_HWCSUM |
 				IFCAP_TSO4 |
 				IFCAP_JUMBO_MTU |
 				IFCAP_VLAN_HWTAGGING |
 				IFCAP_VLAN_MTU |
 				IFCAP_VLAN_HWTSO |
 				IFCAP_LRO;
 
 	ifp->if_capenable = ifp->if_capabilities;
 
 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
 
 	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
 
 	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
 		NULL);
 	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
 
 	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
 
 	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
 
 	return;
 }
 
 static void
 qla_init_locked(qla_host_t *ha)
 {
 	struct ifnet *ifp = ha->ifp;
 
 	qla_stop(ha);
 
 	if (qla_alloc_xmt_bufs(ha) != 0) 
 		return;
 
 	qla_confirm_9kb_enable(ha);
 
 	if (qla_alloc_rcv_bufs(ha) != 0)
 		return;
 
 	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
 
 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
 
 	ha->flags.stop_rcv = 0;
  	if (ql_init_hw_if(ha) == 0) {
 		ifp = ha->ifp;
 		ifp->if_drv_flags |= IFF_DRV_RUNNING;
 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 		ha->flags.qla_watchdog_pause = 0;
 		ha->hw_vlan_tx_frames = 0;
 		ha->tx_tso_frames = 0;
 		ha->flags.qla_interface_up = 1;
 	}
 
 	return;
 }
 
 static void
 qla_init(void *arg)
 {
 	qla_host_t *ha;
 
 	ha = (qla_host_t *)arg;
 
 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
 
 	QLA_LOCK(ha);
 	qla_init_locked(ha);
 	QLA_UNLOCK(ha);
 
 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
 }
 
 static int
 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
 {
 	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
 	struct ifmultiaddr *ifma;
 	int mcnt = 0;
 	struct ifnet *ifp = ha->ifp;
 	int ret = 0;
 
 	if_maddr_rlock(ifp);
 
 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 
 		if (ifma->ifma_addr->sa_family != AF_LINK)
 			continue;
 
 		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
 			break;
 
 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
 			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
 
 		mcnt++;
 	}
 
 	if_maddr_runlock(ifp);
 
 	QLA_LOCK(ha);
 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 		ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
 	}
 	QLA_UNLOCK(ha);
 
 	return (ret);
 }
 
 static int
 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 {
 	int ret = 0;
 	struct ifreq *ifr = (struct ifreq *)data;
 	struct ifaddr *ifa = (struct ifaddr *)data;
 	qla_host_t *ha;
 
 	ha = (qla_host_t *)ifp->if_softc;
 
 	switch (cmd) {
 	case SIOCSIFADDR:
 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
 			__func__, cmd));
 
 		if (ifa->ifa_addr->sa_family == AF_INET) {
 			ifp->if_flags |= IFF_UP;
 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 				QLA_LOCK(ha);
 				qla_init_locked(ha);
 				QLA_UNLOCK(ha);
 			}
 			QL_DPRINT4(ha, (ha->pci_dev,
 				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
 				__func__, cmd,
 				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
 
 			arp_ifinit(ifp, ifa);
 		} else {
 			ether_ioctl(ifp, cmd, data);
 		}
 		break;
 
 	case SIOCSIFMTU:
 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
 			__func__, cmd));
 
 		if (ifr->ifr_mtu > QLA_MAX_MTU) {
 			ret = EINVAL;
 		} else {
 			QLA_LOCK(ha);
 
 			ifp->if_mtu = ifr->ifr_mtu;
 			ha->max_frame_size =
 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
 
 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 				qla_init_locked(ha);
 			}
 
 			if (ifp->if_mtu > ETHERMTU)
 				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
 			else
 				ha->std_replenish = QL_STD_REPLENISH_THRES;
 				
 
 			QLA_UNLOCK(ha);
 
 			if (ret)
 				ret = EINVAL;
 		}
 
 		break;
 
 	case SIOCSIFFLAGS:
 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
 			__func__, cmd));
 
 		QLA_LOCK(ha);
 
 		if (ifp->if_flags & IFF_UP) {
 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 				if ((ifp->if_flags ^ ha->if_flags) &
 					IFF_PROMISC) {
 					ret = ql_set_promisc(ha);
 				} else if ((ifp->if_flags ^ ha->if_flags) &
 					IFF_ALLMULTI) {
 					ret = ql_set_allmulti(ha);
 				}
 			} else {
 				ha->max_frame_size = ifp->if_mtu +
 					ETHER_HDR_LEN + ETHER_CRC_LEN;
 				qla_init_locked(ha);
 			}
 		} else {
 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 				qla_stop(ha);
 			ha->if_flags = ifp->if_flags;
 		}
 
 		QLA_UNLOCK(ha);
 		break;
 
 	case SIOCADDMULTI:
 		QL_DPRINT4(ha, (ha->pci_dev,
 			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
 
 		if (qla_set_multi(ha, 1))
 			ret = EINVAL;
 		break;
 
 	case SIOCDELMULTI:
 		QL_DPRINT4(ha, (ha->pci_dev,
 			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
 
 		if (qla_set_multi(ha, 0))
 			ret = EINVAL;
 		break;
 
 	case SIOCSIFMEDIA:
 	case SIOCGIFMEDIA:
 		QL_DPRINT4(ha, (ha->pci_dev,
 			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
 			__func__, cmd));
 		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
 		break;
 
 	case SIOCSIFCAP:
 	{
 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 
 		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
 			__func__, cmd));
 
 		if (mask & IFCAP_HWCSUM)
 			ifp->if_capenable ^= IFCAP_HWCSUM;
 		if (mask & IFCAP_TSO4)
 			ifp->if_capenable ^= IFCAP_TSO4;
 		if (mask & IFCAP_VLAN_HWTAGGING)
 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 		if (mask & IFCAP_VLAN_HWTSO)
 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
 		if (mask & IFCAP_LRO)
 			ifp->if_capenable ^= IFCAP_LRO;
 
 		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 			qla_init(ha);
 
 		VLAN_CAPABILITIES(ifp);
 		break;
 	}
 
 	default:
 		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
 			__func__, cmd));
 		ret = ether_ioctl(ifp, cmd, data);
 		break;
 	}
 
 	return (ret);
 }
 
 static int
 qla_media_change(struct ifnet *ifp)
 {
 	qla_host_t *ha;
 	struct ifmedia *ifm;
 	int ret = 0;
 
 	ha = (qla_host_t *)ifp->if_softc;
 
 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
 
 	ifm = &ha->media;
 
 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 		ret = EINVAL;
 
 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
 
 	return (ret);
 }
 
 static void
 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
 {
 	qla_host_t *ha;
 
 	ha = (qla_host_t *)ifp->if_softc;
 
 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
 
 	ifmr->ifm_status = IFM_AVALID;
 	ifmr->ifm_active = IFM_ETHER;
 	
 	ql_update_link_state(ha);
 	if (ha->hw.link_up) {
 		ifmr->ifm_status |= IFM_ACTIVE;
 		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
 	}
 
 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
 		(ha->hw.link_up ? "link_up" : "link_down")));
 
 	return;
 }
 
 
 static int
 qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
 	uint32_t iscsi_pdu)
 {
 	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
 	bus_dmamap_t		map;
 	int			nsegs;
 	int			ret = -1;
 	uint32_t		tx_idx;
 	struct mbuf		*m_head = *m_headp;
 
 	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
 
 	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
 	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
 
 	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
 			BUS_DMA_NOWAIT);
 
 	if (ret == EFBIG) {
 
 		struct mbuf *m;
 
 		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
 			m_head->m_pkthdr.len));
 
 		m = m_defrag(m_head, M_NOWAIT);
 		if (m == NULL) {
 			ha->err_tx_defrag++;
 			m_freem(m_head);
 			*m_headp = NULL;
 			device_printf(ha->pci_dev,
 				"%s: m_defrag() = NULL [%d]\n",
 				__func__, ret);
 			return (ENOBUFS);
 		}
 		m_head = m;
 		*m_headp = m_head;
 
 		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
 					segs, &nsegs, BUS_DMA_NOWAIT))) {
 
 			ha->err_tx_dmamap_load++;
 
 			device_printf(ha->pci_dev,
 				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
 				__func__, ret, m_head->m_pkthdr.len);
 
 			if (ret != ENOMEM) {
 				m_freem(m_head);
 				*m_headp = NULL;
 			}
 			return (ret);
 		}
 
 	} else if (ret) {
 
 		ha->err_tx_dmamap_load++;
 
 		device_printf(ha->pci_dev,
 			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
 			__func__, ret, m_head->m_pkthdr.len);
 
 		if (ret != ENOMEM) {
 			m_freem(m_head);
 			*m_headp = NULL;
 		}
 		return (ret);
 	}
 
 	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
 
 	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
 
         if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
 				iscsi_pdu))) {
 		ha->tx_ring[txr_idx].count++;
 		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
 	} else {
 		if (ret == EINVAL) {
 			if (m_head)
 				m_freem(m_head);
 			*m_headp = NULL;
 		}
 	}
 
 	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
 	return (ret);
 }
 
 static int
 qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
 {
         snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
                 "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
 
         mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
 
         fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
                                    M_NOWAIT, &fp->tx_mtx);
         if (fp->tx_br == NULL) {
             QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
                 " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
             return (-ENOMEM);
         }
         return 0;
 }
 
 static void
 qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
 {
         struct mbuf *mp;
         struct ifnet *ifp = ha->ifp;
 
         if (mtx_initialized(&fp->tx_mtx)) {
 
                 if (fp->tx_br != NULL) {
 
                         mtx_lock(&fp->tx_mtx);
 
                         while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
                                 m_freem(mp);
                         }
 
                         mtx_unlock(&fp->tx_mtx);
 
                         buf_ring_free(fp->tx_br, M_DEVBUF);
                         fp->tx_br = NULL;
                 }
                 mtx_destroy(&fp->tx_mtx);
         }
         return;
 }
 
 static void
 qla_fp_taskqueue(void *context, int pending)
 {
         qla_tx_fp_t *fp;
         qla_host_t *ha;
         struct ifnet *ifp;
         struct mbuf  *mp;
         int ret;
 	uint32_t txr_idx;
 	uint32_t iscsi_pdu = 0;
 	uint32_t rx_pkts_left;
 
         fp = context;
 
         if (fp == NULL)
                 return;
 
         ha = (qla_host_t *)fp->ha;
 
         ifp = ha->ifp;
 
 	txr_idx = fp->txr_idx;
 
         mtx_lock(&fp->tx_mtx);
 
         if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
                 IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
                 mtx_unlock(&fp->tx_mtx);
                 goto qla_fp_taskqueue_exit;
         }
 
 	rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
 
 #ifdef QL_ENABLE_ISCSI_TLV
 	ql_hw_tx_done_locked(ha, fp->txr_idx);
 	ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
 	txr_idx = txr_idx + (ha->hw.num_tx_rings >> 1);
 #else
 	ql_hw_tx_done_locked(ha, fp->txr_idx);
 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
 
         mp = drbr_peek(ifp, fp->tx_br);
 
         while (mp != NULL) {
 
 		if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
 #ifdef QL_ENABLE_ISCSI_TLV
 			if (ql_iscsi_pdu(ha, mp) == 0) {
 				iscsi_pdu = 1;
 			}
 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
 		}
 
 		ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
 
                 if (ret) {
                         if (mp != NULL)
                                 drbr_putback(ifp, fp->tx_br, mp);
                         else {
                                 drbr_advance(ifp, fp->tx_br);
                         }
 
                         mtx_unlock(&fp->tx_mtx);
 
                         goto qla_fp_taskqueue_exit0;
                 } else {
                         drbr_advance(ifp, fp->tx_br);
                 }
 
                 mp = drbr_peek(ifp, fp->tx_br);
         }
 
         mtx_unlock(&fp->tx_mtx);
 
 qla_fp_taskqueue_exit0:
 
 	if (rx_pkts_left || ((mp != NULL) && ret)) {
 		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
 	} else {
 		if (!ha->flags.stop_rcv) {
 			QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
 		}
 	}
 
 qla_fp_taskqueue_exit:
 
         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
         return;
 }
 
 static int
 qla_create_fp_taskqueues(qla_host_t *ha)
 {
         int     i;
         uint8_t tq_name[32];
 
         for (i = 0; i < ha->hw.num_sds_rings; i++) {
 
                 qla_tx_fp_t *fp = &ha->tx_fp[i];
 
                 bzero(tq_name, sizeof (tq_name));
                 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
 
                 TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
 
                 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
                                         taskqueue_thread_enqueue,
                                         &fp->fp_taskqueue);
 
                 if (fp->fp_taskqueue == NULL)
                         return (-1);
 
                 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
                         tq_name);
 
                 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
                         fp->fp_taskqueue));
         }
 
         return (0);
 }
 
 static void
 qla_destroy_fp_taskqueues(qla_host_t *ha)
 {
         int     i;
 
         for (i = 0; i < ha->hw.num_sds_rings; i++) {
 
                 qla_tx_fp_t *fp = &ha->tx_fp[i];
 
                 if (fp->fp_taskqueue != NULL) {
                         taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
                         taskqueue_free(fp->fp_taskqueue);
                         fp->fp_taskqueue = NULL;
                 }
         }
         return;
 }
 
 static void
 qla_drain_fp_taskqueues(qla_host_t *ha)
 {
         int     i;
 
         for (i = 0; i < ha->hw.num_sds_rings; i++) {
                 qla_tx_fp_t *fp = &ha->tx_fp[i];
 
                 if (fp->fp_taskqueue != NULL) {
                         taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
                 }
         }
         return;
 }
 
 static int
 qla_transmit(struct ifnet *ifp, struct mbuf  *mp)
 {
 	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
         qla_tx_fp_t *fp;
         int rss_id = 0;
         int ret = 0;
 
         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
 
 #if __FreeBSD_version >= 1100000
         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
 #else
         if (mp->m_flags & M_FLOWID)
 #endif
                 rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
                                         ha->hw.num_sds_rings;
         fp = &ha->tx_fp[rss_id];
 
         if (fp->tx_br == NULL) {
                 ret = EINVAL;
                 goto qla_transmit_exit;
         }
 
         if (mp != NULL) {
                 ret = drbr_enqueue(ifp, fp->tx_br, mp);
         }
 
         if (fp->fp_taskqueue != NULL)
                 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
 
         ret = 0;
 
 qla_transmit_exit:
 
         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
         return ret;
 }
 
 static void
 qla_qflush(struct ifnet *ifp)
 {
         int                     i;
         qla_tx_fp_t		*fp;
         struct mbuf             *mp;
         qla_host_t              *ha;
 
         ha = (qla_host_t *)ifp->if_softc;
 
         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
 
         for (i = 0; i < ha->hw.num_sds_rings; i++) {
 
                 fp = &ha->tx_fp[i];
 
                 if (fp == NULL)
                         continue;
 
                 if (fp->tx_br) {
                         mtx_lock(&fp->tx_mtx);
 
                         while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
                                 m_freem(mp);
                         }
                         mtx_unlock(&fp->tx_mtx);
                 }
         }
         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
 
         return;
 }
 
 static void
 qla_stop(qla_host_t *ha)
 {
 	struct ifnet *ifp = ha->ifp;
 	device_t	dev;
 	int i = 0;
 
 	dev = ha->pci_dev;
 
 	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
 
         for (i = 0; i < ha->hw.num_sds_rings; i++) {
         	qla_tx_fp_t *fp;
 
 		fp = &ha->tx_fp[i];
 
                 if (fp == NULL)
                         continue;
 
 		if (fp->tx_br != NULL) {
                         mtx_lock(&fp->tx_mtx);
                         mtx_unlock(&fp->tx_mtx);
 		}
 	}
 
 	ha->flags.qla_watchdog_pause = 1;
 
 	while (!ha->qla_watchdog_paused) {
 		QLA_UNLOCK(ha);
 		qla_mdelay(__func__, 1);
 		QLA_LOCK(ha);
 	}
 
 	ha->flags.qla_interface_up = 0;
 
 	QLA_UNLOCK(ha);
 	qla_drain_fp_taskqueues(ha);
 	QLA_LOCK(ha);
 
 	ql_del_hw_if(ha);
 
 	qla_free_xmt_bufs(ha);
 	qla_free_rcv_bufs(ha);
 
 	return;
 }
 
 /*
  * Buffer Management Functions for Transmit and Receive Rings
  */
 static int
 qla_alloc_xmt_bufs(qla_host_t *ha)
 {
 	int ret = 0;
 	uint32_t i, j;
 	qla_tx_buf_t *txb;
 
 	if (bus_dma_tag_create(NULL,    /* parent */
 		1, 0,    /* alignment, bounds */
 		BUS_SPACE_MAXADDR,       /* lowaddr */
 		BUS_SPACE_MAXADDR,       /* highaddr */
 		NULL, NULL,      /* filter, filterarg */
 		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
 		QLA_MAX_SEGMENTS,        /* nsegments */
 		PAGE_SIZE,        /* maxsegsize */
 		BUS_DMA_ALLOCNOW,        /* flags */
 		NULL,    /* lockfunc */
 		NULL,    /* lockfuncarg */
 		&ha->tx_tag)) {
 		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
 			__func__);
 		return (ENOMEM);
 	}
 
 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
 		bzero((void *)ha->tx_ring[i].tx_buf,
 			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
 	}
 
 	for (j = 0; j < ha->hw.num_tx_rings; j++) {
 		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
 
 			txb = &ha->tx_ring[j].tx_buf[i];
 
 			if ((ret = bus_dmamap_create(ha->tx_tag,
 					BUS_DMA_NOWAIT, &txb->map))) {
 
 				ha->err_tx_dmamap_create++;
 				device_printf(ha->pci_dev,
 					"%s: bus_dmamap_create failed[%d]\n",
 					__func__, ret);
 
 				qla_free_xmt_bufs(ha);
 
 				return (ret);
 			}
 		}
 	}
 
 	return 0;
 }
 
 /*
  * Release mbuf after it sent on the wire
  */
 static void
 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
 {
 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
 
 	if (txb->m_head && txb->map) {
 
 		bus_dmamap_unload(ha->tx_tag, txb->map);
 
 		m_freem(txb->m_head);
 		txb->m_head = NULL;
 	}
 
 	if (txb->map)
 		bus_dmamap_destroy(ha->tx_tag, txb->map);
 
 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
 }
 
 static void
 qla_free_xmt_bufs(qla_host_t *ha)
 {
 	int		i, j;
 
 	for (j = 0; j < ha->hw.num_tx_rings; j++) {
 		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
 			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
 	}
 
 	if (ha->tx_tag != NULL) {
 		bus_dma_tag_destroy(ha->tx_tag);
 		ha->tx_tag = NULL;
 	}
 
 	for (i = 0; i < ha->hw.num_tx_rings; i++) {
 		bzero((void *)ha->tx_ring[i].tx_buf,
 			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
 	}
 	return;
 }
 
 
 static int
 qla_alloc_rcv_std(qla_host_t *ha)
 {
 	int		i, j, k, r, ret = 0;
 	qla_rx_buf_t	*rxb;
 	qla_rx_ring_t	*rx_ring;
 
 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
 
 		rx_ring = &ha->rx_ring[r];
 
 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
 
 			rxb = &rx_ring->rx_buf[i];
 
 			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
 					&rxb->map);
 
 			if (ret) {
 				device_printf(ha->pci_dev,
 					"%s: dmamap[%d, %d] failed\n",
 					__func__, r, i);
 
 				for (k = 0; k < r; k++) {
 					for (j = 0; j < NUM_RX_DESCRIPTORS;
 						j++) {
 						rxb = &ha->rx_ring[k].rx_buf[j];
 						bus_dmamap_destroy(ha->rx_tag,
 							rxb->map);
 					}
 				}
 
 				for (j = 0; j < i; j++) {
 					bus_dmamap_destroy(ha->rx_tag,
 						rx_ring->rx_buf[j].map);
 				}
 				goto qla_alloc_rcv_std_err;
 			}
 		}
 	}
 
 	qla_init_hw_rcv_descriptors(ha);
 
 	
 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
 
 		rx_ring = &ha->rx_ring[r];
 
 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
 			rxb = &rx_ring->rx_buf[i];
 			rxb->handle = i;
 			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
 				/*
 			 	 * set the physical address in the
 				 * corresponding descriptor entry in the
 				 * receive ring/queue for the hba 
 				 */
 				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
 					rxb->paddr,
 					(rxb->m_head)->m_pkthdr.len);
 			} else {
 				device_printf(ha->pci_dev,
 					"%s: ql_get_mbuf [%d, %d] failed\n",
 					__func__, r, i);
 				bus_dmamap_destroy(ha->rx_tag, rxb->map);
 				goto qla_alloc_rcv_std_err;
 			}
 		}
 	}
 	return 0;
 
 qla_alloc_rcv_std_err:
 	return (-1);
 }
 
 static void
 qla_free_rcv_std(qla_host_t *ha)
 {
 	int		i, r;
 	qla_rx_buf_t	*rxb;
 
 	for (r = 0; r < ha->hw.num_rds_rings; r++) {
 		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
 			rxb = &ha->rx_ring[r].rx_buf[i];
 			if (rxb->m_head != NULL) {
 				bus_dmamap_unload(ha->rx_tag, rxb->map);
 				bus_dmamap_destroy(ha->rx_tag, rxb->map);
 				m_freem(rxb->m_head);
 				rxb->m_head = NULL;
 			}
 		}
 	}
 	return;
 }
 
 static int
 qla_alloc_rcv_bufs(qla_host_t *ha)
 {
 	int		i, ret = 0;
 
 	if (bus_dma_tag_create(NULL,    /* parent */
 			1, 0,    /* alignment, bounds */
 			BUS_SPACE_MAXADDR,       /* lowaddr */
 			BUS_SPACE_MAXADDR,       /* highaddr */
 			NULL, NULL,      /* filter, filterarg */
 			MJUM9BYTES,     /* maxsize */
 			1,        /* nsegments */
 			MJUM9BYTES,        /* maxsegsize */
 			BUS_DMA_ALLOCNOW,        /* flags */
 			NULL,    /* lockfunc */
 			NULL,    /* lockfuncarg */
 			&ha->rx_tag)) {
 
 		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
 			__func__);
 
 		return (ENOMEM);
 	}
 
 	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
 
 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
 		ha->hw.sds[i].sdsr_next = 0;
 		ha->hw.sds[i].rxb_free = NULL;
 		ha->hw.sds[i].rx_free = 0;
 	}
 
 	ret = qla_alloc_rcv_std(ha);
 
 	return (ret);
 }
 
 static void
 qla_free_rcv_bufs(qla_host_t *ha)
 {
 	int		i;
 
 	qla_free_rcv_std(ha);
 
 	if (ha->rx_tag != NULL) {
 		bus_dma_tag_destroy(ha->rx_tag);
 		ha->rx_tag = NULL;
 	}
 
 	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
 
 	for (i = 0; i < ha->hw.num_sds_rings; i++) {
 		ha->hw.sds[i].sdsr_next = 0;
 		ha->hw.sds[i].rxb_free = NULL;
 		ha->hw.sds[i].rx_free = 0;
 	}
 
 	return;
 }
 
 int
 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
 {
 	struct mbuf *mp = nmp;
 	struct ifnet   		*ifp;
 	int            		ret = 0;
 	uint32_t		offset;
 	bus_dma_segment_t	segs[1];
 	int			nsegs, mbuf_size;
 
 	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
 
 	ifp = ha->ifp;
 
         if (ha->hw.enable_9kb)
                 mbuf_size = MJUM9BYTES;
         else
                 mbuf_size = MCLBYTES;
 
 	if (mp == NULL) {
 
 		if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
 			return(-1);
 
                 if (ha->hw.enable_9kb)
                         mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
                 else
                         mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 
 		if (mp == NULL) {
 			ha->err_m_getcl++;
 			ret = ENOBUFS;
 			device_printf(ha->pci_dev,
 					"%s: m_getcl failed\n", __func__);
 			goto exit_ql_get_mbuf;
 		}
 		mp->m_len = mp->m_pkthdr.len = mbuf_size;
 	} else {
 		mp->m_len = mp->m_pkthdr.len = mbuf_size;
 		mp->m_data = mp->m_ext.ext_buf;
 		mp->m_next = NULL;
 	}
 
 	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
 	if (offset) {
 		offset = 8 - offset;
 		m_adj(mp, offset);
 	}
 
 	/*
 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
 	 * machinery to arrange the memory mapping.
 	 */
 	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
 			mp, segs, &nsegs, BUS_DMA_NOWAIT);
 	rxb->paddr = segs[0].ds_addr;
 
 	if (ret || !rxb->paddr || (nsegs != 1)) {
 		m_free(mp);
 		rxb->m_head = NULL;
 		device_printf(ha->pci_dev,
 			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
 			__func__, ret, (long long unsigned int)rxb->paddr,
 			nsegs);
                 ret = -1;
 		goto exit_ql_get_mbuf;
 	}
 	rxb->m_head = mp;
 	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
 
 exit_ql_get_mbuf:
 	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
 	return (ret);
 }
 
 
 static void
 qla_get_peer(qla_host_t *ha)
 {
 	device_t *peers;
 	int count, i, slot;
 	int my_slot = pci_get_slot(ha->pci_dev);
 
 	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
 		return;
 
 	for (i = 0; i < count; i++) {
 		slot = pci_get_slot(peers[i]);
 
 		if ((slot >= 0) && (slot == my_slot) &&
 			(pci_get_device(peers[i]) ==
 				pci_get_device(ha->pci_dev))) {
 			if (ha->pci_dev != peers[i]) 
 				ha->peer_dev = peers[i];
 		}
 	}
 }
 
 static void
 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
 {
 	qla_host_t *ha_peer;
 	
 	if (ha->peer_dev) {
         	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
 
 			ha_peer->msg_from_peer = msg_to_peer;
 		}
 	}
 }
 
 static void
 qla_error_recovery(void *context, int pending)
 {
 	qla_host_t *ha = context;
 	uint32_t msecs_100 = 100;
 	struct ifnet *ifp = ha->ifp;
 	int i = 0;
 
         QLA_LOCK(ha);
 
 	if (ha->flags.qla_interface_up) {
 
 		ha->hw.imd_compl = 1;
 
 		QLA_UNLOCK(ha);
 		qla_mdelay(__func__, 300);
 		QLA_LOCK(ha);
 
 	        ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
 
 		for (i = 0; i < ha->hw.num_sds_rings; i++) {
 	        	qla_tx_fp_t *fp;
 
 			fp = &ha->tx_fp[i];
 
 			if (fp == NULL)
 				continue;
 
 			if (fp->tx_br != NULL) {
 				mtx_lock(&fp->tx_mtx);
 				mtx_unlock(&fp->tx_mtx);
 			}
 		}
 	}
 
         QLA_UNLOCK(ha);
 
 	qla_drain_fp_taskqueues(ha);
 
 	if ((ha->pci_func & 0x1) == 0) {
 
 		if (!ha->msg_from_peer) {
 			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
 
 			while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
 				msecs_100--)
 				qla_mdelay(__func__, 100);
 		}
 
 		ha->msg_from_peer = 0;
 
         	QLA_LOCK(ha);
 
 		ql_minidump(ha);
 
         	QLA_UNLOCK(ha);
 
 		(void) ql_init_hw(ha);
 
         	QLA_LOCK(ha);
 
 		if (ha->flags.qla_interface_up) {
 			qla_free_xmt_bufs(ha);
 			qla_free_rcv_bufs(ha);
 		}
 
         	QLA_UNLOCK(ha);
 
 		qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
 
 	} else {
 		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
 
 			ha->msg_from_peer = 0;
 
 			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
 		} else {
 			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
 		}
 
 		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
 			qla_mdelay(__func__, 100);
 		ha->msg_from_peer = 0;
 
 		(void) ql_init_hw(ha);
 
         	QLA_LOCK(ha);
 
 		if (ha->flags.qla_interface_up) {
 			qla_free_xmt_bufs(ha);
 			qla_free_rcv_bufs(ha);
 		}
 
         	QLA_UNLOCK(ha);
 	}
 
         QLA_LOCK(ha);
 
 	if (ha->flags.qla_interface_up) {
 
 		if (qla_alloc_xmt_bufs(ha) != 0) {
 			QLA_UNLOCK(ha);
 			return;
 		}
 		qla_confirm_9kb_enable(ha);
 
 		if (qla_alloc_rcv_bufs(ha) != 0) {
 			QLA_UNLOCK(ha);
 			return;
 		}
 
 		ha->flags.stop_rcv = 0;
 
 		if (ql_init_hw_if(ha) == 0) {
 			ifp = ha->ifp;
 			ifp->if_drv_flags |= IFF_DRV_RUNNING;
 			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 			ha->flags.qla_watchdog_pause = 0;
 		}
 	} else
 		ha->flags.qla_watchdog_pause = 0;
 
         QLA_UNLOCK(ha);
 }
 
 static void
 qla_async_event(void *context, int pending)
 {
         qla_host_t *ha = context;
 
         QLA_LOCK(ha);
         qla_hw_async_event(ha);
         QLA_UNLOCK(ha);
 }