Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/e1000/if_em.c
Show All 26 Lines | /****************************************************************************** | ||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||||
POSSIBILITY OF SUCH DAMAGE. | POSSIBILITY OF SUCH DAMAGE. | ||||
******************************************************************************/ | ******************************************************************************/ | ||||
/*$FreeBSD$*/ | /*$FreeBSD$*/ | ||||
#include "opt_em.h" | |||||
#include "opt_ddb.h" | |||||
#include "opt_inet.h" | #include "opt_inet.h" | ||||
#include "opt_inet6.h" | #include "opt_inet6.h" | ||||
#ifdef HAVE_KERNEL_OPTION_HEADERS | #ifdef HAVE_KERNEL_OPTION_HEADERS | ||||
#include "opt_device_polling.h" | #include "opt_device_polling.h" | ||||
#endif | #endif | ||||
#include <sys/param.h> | #include <sys/param.h> | ||||
#include <sys/systm.h> | #include <sys/systm.h> | ||||
#ifdef DDB | |||||
#include <sys/types.h> | |||||
#include <ddb/ddb.h> | |||||
#endif | |||||
#if __FreeBSD_version >= 800000 | #if __FreeBSD_version >= 800000 | ||||
#include <sys/buf_ring.h> | #include <sys/buf_ring.h> | ||||
#endif | #endif | ||||
#include <sys/bus.h> | #include <sys/bus.h> | ||||
#include <sys/endian.h> | #include <sys/endian.h> | ||||
#include <sys/kernel.h> | #include <sys/kernel.h> | ||||
#include <sys/kthread.h> | #include <sys/kthread.h> | ||||
#include <sys/malloc.h> | #include <sys/malloc.h> | ||||
#include <sys/mbuf.h> | #include <sys/mbuf.h> | ||||
#include <sys/module.h> | #include <sys/module.h> | ||||
#include <sys/rman.h> | #include <sys/rman.h> | ||||
#include <sys/smp.h> | |||||
#include <sys/socket.h> | #include <sys/socket.h> | ||||
#include <sys/sockio.h> | #include <sys/sockio.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/taskqueue.h> | #include <sys/taskqueue.h> | ||||
#include <sys/eventhandler.h> | #include <sys/eventhandler.h> | ||||
#include <machine/bus.h> | #include <machine/bus.h> | ||||
#include <machine/resource.h> | #include <machine/resource.h> | ||||
▲ Show 20 Lines • Show All 140 Lines • ▼ Show 20 Lines | |||||
static int em_attach(device_t); | static int em_attach(device_t); | ||||
static int em_detach(device_t); | static int em_detach(device_t); | ||||
static int em_shutdown(device_t); | static int em_shutdown(device_t); | ||||
static int em_suspend(device_t); | static int em_suspend(device_t); | ||||
static int em_resume(device_t); | static int em_resume(device_t); | ||||
#ifdef EM_MULTIQUEUE | #ifdef EM_MULTIQUEUE | ||||
static int em_mq_start(if_t, struct mbuf *); | static int em_mq_start(if_t, struct mbuf *); | ||||
static int em_mq_start_locked(if_t, | static int em_mq_start_locked(if_t, | ||||
struct tx_ring *, struct mbuf *); | struct tx_ring *); | ||||
static void em_qflush(if_t); | static void em_qflush(if_t); | ||||
#else | #else | ||||
static void em_start(if_t); | static void em_start(if_t); | ||||
static void em_start_locked(if_t, struct tx_ring *); | static void em_start_locked(if_t, struct tx_ring *); | ||||
#endif | #endif | ||||
static int em_ioctl(if_t, u_long, caddr_t); | static int em_ioctl(if_t, u_long, caddr_t); | ||||
static uint64_t em_get_counter(if_t, ift_counter); | static uint64_t em_get_counter(if_t, ift_counter); | ||||
static void em_init(void *); | static void em_init(void *); | ||||
▲ Show 20 Lines • Show All 74 Lines • ▼ Show 20 Lines | |||||
/* MSIX handlers */ | /* MSIX handlers */ | ||||
static void em_msix_tx(void *); | static void em_msix_tx(void *); | ||||
static void em_msix_rx(void *); | static void em_msix_rx(void *); | ||||
static void em_msix_link(void *); | static void em_msix_link(void *); | ||||
static void em_handle_tx(void *context, int pending); | static void em_handle_tx(void *context, int pending); | ||||
static void em_handle_rx(void *context, int pending); | static void em_handle_rx(void *context, int pending); | ||||
static void em_handle_link(void *context, int pending); | static void em_handle_link(void *context, int pending); | ||||
#ifdef EM_MULTIQUEUE | |||||
static void em_enable_vectors_82574(struct adapter *); | |||||
#endif | |||||
static void em_set_sysctl_value(struct adapter *, const char *, | static void em_set_sysctl_value(struct adapter *, const char *, | ||||
const char *, int *, int); | const char *, int *, int); | ||||
static int em_set_flowcntl(SYSCTL_HANDLER_ARGS); | static int em_set_flowcntl(SYSCTL_HANDLER_ARGS); | ||||
static int em_sysctl_eee(SYSCTL_HANDLER_ARGS); | static int em_sysctl_eee(SYSCTL_HANDLER_ARGS); | ||||
static __inline void em_rx_discard(struct rx_ring *, int); | static __inline void em_rx_discard(struct rx_ring *, int); | ||||
#ifdef DEVICE_POLLING | #ifdef DEVICE_POLLING | ||||
▲ Show 20 Lines • Show All 73 Lines • ▼ Show 20 Lines | |||||
static int em_debug_sbp = FALSE; | static int em_debug_sbp = FALSE; | ||||
SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0, | SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0, | ||||
"Show bad packets in promiscuous mode"); | "Show bad packets in promiscuous mode"); | ||||
static int em_enable_msix = TRUE; | static int em_enable_msix = TRUE; | ||||
SYSCTL_INT(_hw_em, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &em_enable_msix, 0, | SYSCTL_INT(_hw_em, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &em_enable_msix, 0, | ||||
"Enable MSI-X interrupts"); | "Enable MSI-X interrupts"); | ||||
#ifdef EM_MULTIQUEUE | |||||
static int em_num_queues = 1; | |||||
SYSCTL_INT(_hw_em, OID_AUTO, num_queues, CTLFLAG_RDTUN, &em_num_queues, 0, | |||||
"82574 only: Number of queues to configure, 0 indicates autoconfigure"); | |||||
#endif | |||||
/* | |||||
** Global variable to store last used CPU when binding queues | |||||
** to CPUs in igb_allocate_msix. Starts at CPU_FIRST and increments when a | |||||
** queue is bound to a cpu. | |||||
*/ | |||||
static int em_last_bind_cpu = -1; | |||||
/* How many packets rxeof tries to clean at a time */ | /* How many packets rxeof tries to clean at a time */ | ||||
static int em_rx_process_limit = 100; | static int em_rx_process_limit = 100; | ||||
SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, | ||||
&em_rx_process_limit, 0, | &em_rx_process_limit, 0, | ||||
"Maximum number of received packets to process " | "Maximum number of received packets to process " | ||||
"at a time, -1 means unlimited"); | "at a time, -1 means unlimited"); | ||||
/* Energy efficient ethernet - default to OFF */ | /* Energy efficient ethernet - default to OFF */ | ||||
Show All 16 Lines | |||||
* | * | ||||
* return BUS_PROBE_DEFAULT on success, positive on failure | * return BUS_PROBE_DEFAULT on success, positive on failure | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static int | static int | ||||
em_probe(device_t dev) | em_probe(device_t dev) | ||||
{ | { | ||||
char adapter_name[60]; | char adapter_name[60]; | ||||
u16 pci_vendor_id = 0; | uint16_t pci_vendor_id = 0; | ||||
u16 pci_device_id = 0; | uint16_t pci_device_id = 0; | ||||
u16 pci_subvendor_id = 0; | uint16_t pci_subvendor_id = 0; | ||||
u16 pci_subdevice_id = 0; | uint16_t pci_subdevice_id = 0; | ||||
em_vendor_info_t *ent; | em_vendor_info_t *ent; | ||||
INIT_DEBUGOUT("em_probe: begin"); | INIT_DEBUGOUT("em_probe: begin"); | ||||
pci_vendor_id = pci_get_vendor(dev); | pci_vendor_id = pci_get_vendor(dev); | ||||
if (pci_vendor_id != EM_VENDOR_ID) | if (pci_vendor_id != EM_VENDOR_ID) | ||||
return (ENXIO); | return (ENXIO); | ||||
▲ Show 20 Lines • Show All 110 Lines • ▼ Show 20 Lines | em_attach(device_t dev) | ||||
/* Do Shared Code initialization */ | /* Do Shared Code initialization */ | ||||
if (e1000_setup_init_funcs(hw, TRUE)) { | if (e1000_setup_init_funcs(hw, TRUE)) { | ||||
device_printf(dev, "Setup of Shared code failed\n"); | device_printf(dev, "Setup of Shared code failed\n"); | ||||
error = ENXIO; | error = ENXIO; | ||||
goto err_pci; | goto err_pci; | ||||
} | } | ||||
/* | |||||
* Setup MSI/X or MSI if PCI Express | |||||
*/ | |||||
adapter->msix = em_setup_msix(adapter); | |||||
e1000_get_bus_info(hw); | e1000_get_bus_info(hw); | ||||
/* Set up some sysctls for the tunable interrupt delays */ | /* Set up some sysctls for the tunable interrupt delays */ | ||||
em_add_int_delay_sysctl(adapter, "rx_int_delay", | em_add_int_delay_sysctl(adapter, "rx_int_delay", | ||||
"receive interrupt delay in usecs", &adapter->rx_int_delay, | "receive interrupt delay in usecs", &adapter->rx_int_delay, | ||||
E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt); | E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt); | ||||
em_add_int_delay_sysctl(adapter, "tx_int_delay", | em_add_int_delay_sysctl(adapter, "tx_int_delay", | ||||
"transmit interrupt delay in usecs", &adapter->tx_int_delay, | "transmit interrupt delay in usecs", &adapter->tx_int_delay, | ||||
▲ Show 20 Lines • Show All 314 Lines • ▼ Show 20 Lines | em_resume(device_t dev) | ||||
em_init_manageability(adapter); | em_init_manageability(adapter); | ||||
if ((if_getflags(ifp) & IFF_UP) && | if ((if_getflags(ifp) & IFF_UP) && | ||||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) && adapter->link_active) { | (if_getdrvflags(ifp) & IFF_DRV_RUNNING) && adapter->link_active) { | ||||
for (int i = 0; i < adapter->num_queues; i++, txr++) { | for (int i = 0; i < adapter->num_queues; i++, txr++) { | ||||
EM_TX_LOCK(txr); | EM_TX_LOCK(txr); | ||||
#ifdef EM_MULTIQUEUE | #ifdef EM_MULTIQUEUE | ||||
if (!drbr_empty(ifp, txr->br)) | if (!drbr_empty(ifp, txr->br)) | ||||
em_mq_start_locked(ifp, txr, NULL); | em_mq_start_locked(ifp, txr); | ||||
#else | #else | ||||
if (!if_sendq_empty(ifp)) | if (!if_sendq_empty(ifp)) | ||||
em_start_locked(ifp, txr); | em_start_locked(ifp, txr); | ||||
#endif | #endif | ||||
EM_TX_UNLOCK(txr); | EM_TX_UNLOCK(txr); | ||||
} | } | ||||
} | } | ||||
EM_CORE_UNLOCK(adapter); | EM_CORE_UNLOCK(adapter); | ||||
return bus_generic_resume(dev); | return bus_generic_resume(dev); | ||||
} | } | ||||
#ifdef EM_MULTIQUEUE | #ifndef EM_MULTIQUEUE | ||||
static void | |||||
em_start_locked(if_t ifp, struct tx_ring *txr) | |||||
{ | |||||
struct adapter *adapter = if_getsoftc(ifp); | |||||
struct mbuf *m_head; | |||||
EM_TX_LOCK_ASSERT(txr); | |||||
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != | |||||
IFF_DRV_RUNNING) | |||||
return; | |||||
if (!adapter->link_active) | |||||
return; | |||||
while (!if_sendq_empty(ifp)) { | |||||
/* Call cleanup if number of TX descriptors low */ | |||||
if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD) | |||||
em_txeof(txr); | |||||
if (txr->tx_avail < EM_MAX_SCATTER) { | |||||
if_setdrvflagbits(ifp,IFF_DRV_OACTIVE, 0); | |||||
break; | |||||
} | |||||
m_head = if_dequeue(ifp); | |||||
if (m_head == NULL) | |||||
break; | |||||
/* | |||||
* Encapsulation can modify our pointer, and or make it | |||||
* NULL on failure. In that event, we can't requeue. | |||||
*/ | |||||
if (em_xmit(txr, &m_head)) { | |||||
if (m_head == NULL) | |||||
break; | |||||
if_sendq_prepend(ifp, m_head); | |||||
break; | |||||
} | |||||
/* Mark the queue as having work */ | |||||
if (txr->busy == EM_TX_IDLE) | |||||
txr->busy = EM_TX_BUSY; | |||||
/* Send a copy of the frame to the BPF listener */ | |||||
ETHER_BPF_MTAP(ifp, m_head); | |||||
} | |||||
return; | |||||
} | |||||
static void | |||||
em_start(if_t ifp) | |||||
{ | |||||
struct adapter *adapter = if_getsoftc(ifp); | |||||
struct tx_ring *txr = adapter->tx_rings; | |||||
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { | |||||
EM_TX_LOCK(txr); | |||||
em_start_locked(ifp, txr); | |||||
EM_TX_UNLOCK(txr); | |||||
} | |||||
return; | |||||
} | |||||
#else /* EM_MULTIQUEUE */ | |||||
/********************************************************************* | /********************************************************************* | ||||
* Multiqueue Transmit routines | * Multiqueue Transmit routines | ||||
* | * | ||||
* em_mq_start is called by the stack to initiate a transmit. | * em_mq_start is called by the stack to initiate a transmit. | ||||
* however, if busy the driver can queue the request rather | * however, if busy the driver can queue the request rather | ||||
* than do an immediate send. It is this that is an advantage | * than do an immediate send. It is this that is an advantage | ||||
* in this driver, rather than also having multiple tx queues. | * in this driver, rather than also having multiple tx queues. | ||||
**********************************************************************/ | **********************************************************************/ | ||||
/* | |||||
** Multiqueue capable stack interface | |||||
*/ | |||||
static int | static int | ||||
em_mq_start_locked(if_t ifp, struct tx_ring *txr, struct mbuf *m) | em_mq_start(if_t ifp, struct mbuf *m) | ||||
{ | { | ||||
struct adapter *adapter = if_getsoftc(ifp); | |||||
struct tx_ring *txr = adapter->tx_rings; | |||||
unsigned int i, error; | |||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) | |||||
i = m->m_pkthdr.flowid % adapter->num_queues; | |||||
else | |||||
i = curcpu % adapter->num_queues; | |||||
txr = &adapter->tx_rings[i]; | |||||
error = drbr_enqueue(ifp, txr->br, m); | |||||
if (error) | |||||
return (error); | |||||
if (EM_TX_TRYLOCK(txr)) { | |||||
em_mq_start_locked(ifp, txr); | |||||
EM_TX_UNLOCK(txr); | |||||
} else | |||||
taskqueue_enqueue(txr->tq, &txr->tx_task); | |||||
return (0); | |||||
} | |||||
static int | |||||
em_mq_start_locked(if_t ifp, struct tx_ring *txr) | |||||
{ | |||||
struct adapter *adapter = txr->adapter; | struct adapter *adapter = txr->adapter; | ||||
struct mbuf *next; | struct mbuf *next; | ||||
int err = 0, enq = 0; | int err = 0, enq = 0; | ||||
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != | EM_TX_LOCK_ASSERT(txr); | ||||
IFF_DRV_RUNNING || adapter->link_active == 0) { | |||||
if (m != NULL) | |||||
err = drbr_enqueue(ifp, txr->br, m); | |||||
return (err); | |||||
} | |||||
enq = 0; | if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) || | ||||
if (m != NULL) { | adapter->link_active == 0) { | ||||
err = drbr_enqueue(ifp, txr->br, m); | return (ENETDOWN); | ||||
if (err) | |||||
return (err); | |||||
} | } | ||||
/* Process the queue */ | /* Process the queue */ | ||||
while ((next = drbr_peek(ifp, txr->br)) != NULL) { | while ((next = drbr_peek(ifp, txr->br)) != NULL) { | ||||
if ((err = em_xmit(txr, &next)) != 0) { | if ((err = em_xmit(txr, &next)) != 0) { | ||||
if (next == NULL) | if (next == NULL) { | ||||
/* It was freed, move forward */ | |||||
drbr_advance(ifp, txr->br); | drbr_advance(ifp, txr->br); | ||||
else | } else { | ||||
/* | |||||
* Still have one left, it may not be | |||||
* the same since the transmit function | |||||
* may have changed it. | |||||
*/ | |||||
drbr_putback(ifp, txr->br, next); | drbr_putback(ifp, txr->br, next); | ||||
} | |||||
break; | break; | ||||
} | } | ||||
drbr_advance(ifp, txr->br); | drbr_advance(ifp, txr->br); | ||||
enq++; | enq++; | ||||
if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len); | if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len); | ||||
if (next->m_flags & M_MCAST) | if (next->m_flags & M_MCAST) | ||||
if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); | if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); | ||||
if_etherbpfmtap(ifp, next); | ETHER_BPF_MTAP(ifp, next); | ||||
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) | if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) | ||||
break; | break; | ||||
} | } | ||||
/* Mark the queue as having work */ | /* Mark the queue as having work */ | ||||
if ((enq > 0) && (txr->busy == EM_TX_IDLE)) | if ((enq > 0) && (txr->busy == EM_TX_IDLE)) | ||||
txr->busy = EM_TX_BUSY; | txr->busy = EM_TX_BUSY; | ||||
if (txr->tx_avail < EM_MAX_SCATTER) | if (txr->tx_avail < EM_MAX_SCATTER) | ||||
em_txeof(txr); | em_txeof(txr); | ||||
if (txr->tx_avail < EM_MAX_SCATTER) | if (txr->tx_avail < EM_MAX_SCATTER) { | ||||
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE,0); | if_setdrvflagbits(ifp, IFF_DRV_OACTIVE,0); | ||||
} | |||||
return (err); | return (err); | ||||
} | } | ||||
/* | /* | ||||
** Multiqueue capable stack interface | |||||
*/ | |||||
static int | |||||
em_mq_start(if_t ifp, struct mbuf *m) | |||||
{ | |||||
struct adapter *adapter = if_getsoftc(ifp); | |||||
struct tx_ring *txr = adapter->tx_rings; | |||||
int error; | |||||
if (EM_TX_TRYLOCK(txr)) { | |||||
error = em_mq_start_locked(ifp, txr, m); | |||||
EM_TX_UNLOCK(txr); | |||||
} else | |||||
error = drbr_enqueue(ifp, txr->br, m); | |||||
return (error); | |||||
} | |||||
/* | |||||
** Flush all ring buffers | ** Flush all ring buffers | ||||
*/ | */ | ||||
static void | static void | ||||
em_qflush(if_t ifp) | em_qflush(if_t ifp) | ||||
{ | { | ||||
struct adapter *adapter = if_getsoftc(ifp); | struct adapter *adapter = if_getsoftc(ifp); | ||||
struct tx_ring *txr = adapter->tx_rings; | struct tx_ring *txr = adapter->tx_rings; | ||||
struct mbuf *m; | struct mbuf *m; | ||||
for (int i = 0; i < adapter->num_queues; i++, txr++) { | for (int i = 0; i < adapter->num_queues; i++, txr++) { | ||||
EM_TX_LOCK(txr); | EM_TX_LOCK(txr); | ||||
while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) | while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) | ||||
m_freem(m); | m_freem(m); | ||||
EM_TX_UNLOCK(txr); | EM_TX_UNLOCK(txr); | ||||
} | } | ||||
if_qflush(ifp); | if_qflush(ifp); | ||||
} | } | ||||
#else /* !EM_MULTIQUEUE */ | |||||
static void | |||||
em_start_locked(if_t ifp, struct tx_ring *txr) | |||||
{ | |||||
struct adapter *adapter = if_getsoftc(ifp); | |||||
struct mbuf *m_head; | |||||
EM_TX_LOCK_ASSERT(txr); | |||||
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != | |||||
IFF_DRV_RUNNING) | |||||
return; | |||||
if (!adapter->link_active) | |||||
return; | |||||
while (!if_sendq_empty(ifp)) { | |||||
/* Call cleanup if number of TX descriptors low */ | |||||
if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD) | |||||
em_txeof(txr); | |||||
if (txr->tx_avail < EM_MAX_SCATTER) { | |||||
if_setdrvflagbits(ifp,IFF_DRV_OACTIVE, 0); | |||||
break; | |||||
} | |||||
m_head = if_dequeue(ifp); | |||||
if (m_head == NULL) | |||||
break; | |||||
/* | |||||
* Encapsulation can modify our pointer, and or make it | |||||
* NULL on failure. In that event, we can't requeue. | |||||
*/ | |||||
if (em_xmit(txr, &m_head)) { | |||||
if (m_head == NULL) | |||||
break; | |||||
if_sendq_prepend(ifp, m_head); | |||||
break; | |||||
} | |||||
/* Mark the queue as having work */ | |||||
if (txr->busy == EM_TX_IDLE) | |||||
txr->busy = EM_TX_BUSY; | |||||
/* Send a copy of the frame to the BPF listener */ | |||||
if_etherbpfmtap(ifp, m_head); | |||||
} | |||||
return; | |||||
} | |||||
static void | |||||
em_start(if_t ifp) | |||||
{ | |||||
struct adapter *adapter = if_getsoftc(ifp); | |||||
struct tx_ring *txr = adapter->tx_rings; | |||||
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { | |||||
EM_TX_LOCK(txr); | |||||
em_start_locked(ifp, txr); | |||||
EM_TX_UNLOCK(txr); | |||||
} | |||||
return; | |||||
} | |||||
#endif /* EM_MULTIQUEUE */ | #endif /* EM_MULTIQUEUE */ | ||||
/********************************************************************* | /********************************************************************* | ||||
* Ioctl entry point | * Ioctl entry point | ||||
* | * | ||||
* em_ioctl is called when the user wants to configure the | * em_ioctl is called when the user wants to configure the | ||||
* interface. | * interface. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 379 Lines • ▼ Show 20 Lines | em_poll(if_t ifp, enum poll_cmd cmd, int count) | ||||
EM_CORE_UNLOCK(adapter); | EM_CORE_UNLOCK(adapter); | ||||
em_rxeof(rxr, count, &rx_done); | em_rxeof(rxr, count, &rx_done); | ||||
EM_TX_LOCK(txr); | EM_TX_LOCK(txr); | ||||
em_txeof(txr); | em_txeof(txr); | ||||
#ifdef EM_MULTIQUEUE | #ifdef EM_MULTIQUEUE | ||||
if (!drbr_empty(ifp, txr->br)) | if (!drbr_empty(ifp, txr->br)) | ||||
em_mq_start_locked(ifp, txr, NULL); | em_mq_start_locked(ifp, txr); | ||||
#else | #else | ||||
if (!if_sendq_empty(ifp)) | if (!if_sendq_empty(ifp)) | ||||
em_start_locked(ifp, txr); | em_start_locked(ifp, txr); | ||||
#endif | #endif | ||||
EM_TX_UNLOCK(txr); | EM_TX_UNLOCK(txr); | ||||
return (rx_done); | return (rx_done); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | |||||
static void | static void | ||||
em_handle_que(void *context, int pending) | em_handle_que(void *context, int pending) | ||||
{ | { | ||||
struct adapter *adapter = context; | struct adapter *adapter = context; | ||||
if_t ifp = adapter->ifp; | if_t ifp = adapter->ifp; | ||||
struct tx_ring *txr = adapter->tx_rings; | struct tx_ring *txr = adapter->tx_rings; | ||||
struct rx_ring *rxr = adapter->rx_rings; | struct rx_ring *rxr = adapter->rx_rings; | ||||
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { | if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { | ||||
bool more = em_rxeof(rxr, adapter->rx_process_limit, NULL); | bool more = em_rxeof(rxr, adapter->rx_process_limit, NULL); | ||||
EM_TX_LOCK(txr); | EM_TX_LOCK(txr); | ||||
em_txeof(txr); | em_txeof(txr); | ||||
#ifdef EM_MULTIQUEUE | #ifdef EM_MULTIQUEUE | ||||
if (!drbr_empty(ifp, txr->br)) | if (!drbr_empty(ifp, txr->br)) | ||||
em_mq_start_locked(ifp, txr, NULL); | em_mq_start_locked(ifp, txr); | ||||
#else | #else | ||||
if (!if_sendq_empty(ifp)) | if (!if_sendq_empty(ifp)) | ||||
em_start_locked(ifp, txr); | em_start_locked(ifp, txr); | ||||
#endif | #endif | ||||
EM_TX_UNLOCK(txr); | EM_TX_UNLOCK(txr); | ||||
if (more) { | if (more) { | ||||
taskqueue_enqueue(adapter->tq, &adapter->que_task); | taskqueue_enqueue(adapter->tq, &adapter->que_task); | ||||
return; | return; | ||||
Show All 17 Lines | em_msix_tx(void *arg) | ||||
struct adapter *adapter = txr->adapter; | struct adapter *adapter = txr->adapter; | ||||
if_t ifp = adapter->ifp; | if_t ifp = adapter->ifp; | ||||
++txr->tx_irq; | ++txr->tx_irq; | ||||
EM_TX_LOCK(txr); | EM_TX_LOCK(txr); | ||||
em_txeof(txr); | em_txeof(txr); | ||||
#ifdef EM_MULTIQUEUE | #ifdef EM_MULTIQUEUE | ||||
if (!drbr_empty(ifp, txr->br)) | if (!drbr_empty(ifp, txr->br)) | ||||
em_mq_start_locked(ifp, txr, NULL); | em_mq_start_locked(ifp, txr); | ||||
#else | #else | ||||
if (!if_sendq_empty(ifp)) | if (!if_sendq_empty(ifp)) | ||||
em_start_locked(ifp, txr); | em_start_locked(ifp, txr); | ||||
#endif | #endif | ||||
/* Reenable this interrupt */ | /* Reenable this interrupt */ | ||||
E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims); | E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims); | ||||
EM_TX_UNLOCK(txr); | EM_TX_UNLOCK(txr); | ||||
return; | return; | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
Show All 9 Lines | em_msix_rx(void *arg) | ||||
bool more; | bool more; | ||||
++rxr->rx_irq; | ++rxr->rx_irq; | ||||
if (!(if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING)) | if (!(if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING)) | ||||
return; | return; | ||||
more = em_rxeof(rxr, adapter->rx_process_limit, NULL); | more = em_rxeof(rxr, adapter->rx_process_limit, NULL); | ||||
if (more) | if (more) | ||||
taskqueue_enqueue(rxr->tq, &rxr->rx_task); | taskqueue_enqueue(rxr->tq, &rxr->rx_task); | ||||
else | else { | ||||
/* Reenable this interrupt */ | /* Reenable this interrupt */ | ||||
E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims); | E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims); | ||||
} | |||||
return; | return; | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* MSIX Link Fast Interrupt Service routine | * MSIX Link Fast Interrupt Service routine | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
Show All 10 Lines | if (reg_icr & E1000_ICR_RXO) | ||||
adapter->rx_overruns++; | adapter->rx_overruns++; | ||||
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { | if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { | ||||
adapter->hw.mac.get_link_status = 1; | adapter->hw.mac.get_link_status = 1; | ||||
em_handle_link(adapter, 0); | em_handle_link(adapter, 0); | ||||
} else | } else | ||||
E1000_WRITE_REG(&adapter->hw, E1000_IMS, | E1000_WRITE_REG(&adapter->hw, E1000_IMS, | ||||
EM_MSIX_LINK | E1000_IMS_LSC); | EM_MSIX_LINK | E1000_IMS_LSC); | ||||
/* | |||||
** Because we must read the ICR for this interrupt | |||||
** it may clear other causes using autoclear, for | |||||
** this reason we simply create a soft interrupt | |||||
** for all these vectors. | |||||
*/ | |||||
if (reg_icr) { | |||||
E1000_WRITE_REG(&adapter->hw, | |||||
E1000_ICS, adapter->ims); | |||||
} | |||||
return; | return; | ||||
} | } | ||||
static void | static void | ||||
em_handle_rx(void *context, int pending) | em_handle_rx(void *context, int pending) | ||||
{ | { | ||||
struct rx_ring *rxr = context; | struct rx_ring *rxr = context; | ||||
struct adapter *adapter = rxr->adapter; | struct adapter *adapter = rxr->adapter; | ||||
bool more; | bool more; | ||||
more = em_rxeof(rxr, adapter->rx_process_limit, NULL); | more = em_rxeof(rxr, adapter->rx_process_limit, NULL); | ||||
if (more) | if (more) | ||||
taskqueue_enqueue(rxr->tq, &rxr->rx_task); | taskqueue_enqueue(rxr->tq, &rxr->rx_task); | ||||
else | else { | ||||
/* Reenable this interrupt */ | /* Reenable this interrupt */ | ||||
E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims); | E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims); | ||||
} | } | ||||
} | |||||
static void | static void | ||||
em_handle_tx(void *context, int pending) | em_handle_tx(void *context, int pending) | ||||
{ | { | ||||
struct tx_ring *txr = context; | struct tx_ring *txr = context; | ||||
struct adapter *adapter = txr->adapter; | struct adapter *adapter = txr->adapter; | ||||
if_t ifp = adapter->ifp; | if_t ifp = adapter->ifp; | ||||
EM_TX_LOCK(txr); | EM_TX_LOCK(txr); | ||||
em_txeof(txr); | em_txeof(txr); | ||||
#ifdef EM_MULTIQUEUE | #ifdef EM_MULTIQUEUE | ||||
if (!drbr_empty(ifp, txr->br)) | if (!drbr_empty(ifp, txr->br)) | ||||
em_mq_start_locked(ifp, txr, NULL); | em_mq_start_locked(ifp, txr); | ||||
#else | #else | ||||
if (!if_sendq_empty(ifp)) | if (!if_sendq_empty(ifp)) | ||||
em_start_locked(ifp, txr); | em_start_locked(ifp, txr); | ||||
#endif | #endif | ||||
E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims); | E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims); | ||||
EM_TX_UNLOCK(txr); | EM_TX_UNLOCK(txr); | ||||
} | } | ||||
Show All 13 Lines | em_handle_link(void *context, int pending) | ||||
callout_reset(&adapter->timer, hz, em_local_timer, adapter); | callout_reset(&adapter->timer, hz, em_local_timer, adapter); | ||||
E1000_WRITE_REG(&adapter->hw, E1000_IMS, | E1000_WRITE_REG(&adapter->hw, E1000_IMS, | ||||
EM_MSIX_LINK | E1000_IMS_LSC); | EM_MSIX_LINK | E1000_IMS_LSC); | ||||
if (adapter->link_active) { | if (adapter->link_active) { | ||||
for (int i = 0; i < adapter->num_queues; i++, txr++) { | for (int i = 0; i < adapter->num_queues; i++, txr++) { | ||||
EM_TX_LOCK(txr); | EM_TX_LOCK(txr); | ||||
#ifdef EM_MULTIQUEUE | #ifdef EM_MULTIQUEUE | ||||
if (!drbr_empty(ifp, txr->br)) | if (!drbr_empty(ifp, txr->br)) | ||||
em_mq_start_locked(ifp, txr, NULL); | em_mq_start_locked(ifp, txr); | ||||
#else | #else | ||||
if (if_sendq_empty(ifp)) | if (if_sendq_empty(ifp)) | ||||
em_start_locked(ifp, txr); | em_start_locked(ifp, txr); | ||||
#endif | #endif | ||||
EM_TX_UNLOCK(txr); | EM_TX_UNLOCK(txr); | ||||
} | } | ||||
} | } | ||||
EM_CORE_UNLOCK(adapter); | EM_CORE_UNLOCK(adapter); | ||||
▲ Show 20 Lines • Show All 527 Lines • ▼ Show 20 Lines | |||||
static void | static void | ||||
em_local_timer(void *arg) | em_local_timer(void *arg) | ||||
{ | { | ||||
struct adapter *adapter = arg; | struct adapter *adapter = arg; | ||||
if_t ifp = adapter->ifp; | if_t ifp = adapter->ifp; | ||||
struct tx_ring *txr = adapter->tx_rings; | struct tx_ring *txr = adapter->tx_rings; | ||||
struct rx_ring *rxr = adapter->rx_rings; | struct rx_ring *rxr = adapter->rx_rings; | ||||
u32 trigger; | u32 trigger = 0; | ||||
EM_CORE_LOCK_ASSERT(adapter); | EM_CORE_LOCK_ASSERT(adapter); | ||||
em_update_link_status(adapter); | em_update_link_status(adapter); | ||||
em_update_stats_counters(adapter); | em_update_stats_counters(adapter); | ||||
/* Reset LAA into RAR[0] on 82571 */ | /* Reset LAA into RAR[0] on 82571 */ | ||||
if ((adapter->hw.mac.type == e1000_82571) && | if ((adapter->hw.mac.type == e1000_82571) && | ||||
e1000_get_laa_state_82571(&adapter->hw)) | e1000_get_laa_state_82571(&adapter->hw)) | ||||
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); | e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); | ||||
/* Mask to use in the irq trigger */ | /* Mask to use in the irq trigger */ | ||||
if (adapter->msix_mem) | if (adapter->msix_mem) { | ||||
trigger = rxr->ims; | for (int i = 0; i < adapter->num_queues; i++, rxr++) | ||||
else | trigger |= rxr->ims; | ||||
rxr = adapter->rx_rings; | |||||
} else | |||||
trigger = E1000_ICS_RXDMT0; | trigger = E1000_ICS_RXDMT0; | ||||
/* | /* | ||||
** Check on the state of the TX queue(s), this | ** Check on the state of the TX queue(s), this | ||||
** can be done without the lock because its RO | ** can be done without the lock because its RO | ||||
** and the HUNG state will be static if set. | ** and the HUNG state will be static if set. | ||||
*/ | */ | ||||
for (int i = 0; i < adapter->num_queues; i++, txr++) { | for (int i = 0; i < adapter->num_queues; i++, txr++) { | ||||
/* Last cycle a queue was declared hung */ | |||||
if (txr->busy == EM_TX_HUNG) | if (txr->busy == EM_TX_HUNG) | ||||
goto hung; | goto hung; | ||||
if (txr->busy >= EM_TX_MAXTRIES) | if (txr->busy >= EM_TX_MAXTRIES) | ||||
txr->busy = EM_TX_HUNG; | txr->busy = EM_TX_HUNG; | ||||
/* Schedule a TX tasklet if needed */ | /* Schedule a TX tasklet if needed */ | ||||
if (txr->tx_avail <= EM_MAX_SCATTER) | if (txr->tx_avail <= EM_MAX_SCATTER) | ||||
taskqueue_enqueue(txr->tq, &txr->tx_task); | taskqueue_enqueue(txr->tq, &txr->tx_task); | ||||
} | } | ||||
callout_reset(&adapter->timer, hz, em_local_timer, adapter); | callout_reset(&adapter->timer, hz, em_local_timer, adapter); | ||||
#ifndef DEVICE_POLLING | #ifndef DEVICE_POLLING | ||||
/* Trigger an RX interrupt to guarantee mbuf refresh */ | /* Trigger an RX interrupt to guarantee mbuf refresh */ | ||||
E1000_WRITE_REG(&adapter->hw, E1000_ICS, trigger); | E1000_WRITE_REG(&adapter->hw, E1000_ICS, trigger); | ||||
#endif | #endif | ||||
return; | return; | ||||
hung: | hung: | ||||
/* Looks like we're hung */ | /* Looks like we're hung */ | ||||
device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); | device_printf(adapter->dev, "Watchdog timeout Queue[%d]-- resetting\n", | ||||
device_printf(adapter->dev, | txr->me); | ||||
"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, | em_print_debug_info(adapter); | ||||
E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)), | |||||
E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me))); | |||||
device_printf(adapter->dev,"TX(%d) desc avail = %d," | |||||
"Next TX to Clean = %d\n", | |||||
txr->me, txr->tx_avail, txr->next_to_clean); | |||||
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); | if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); | ||||
adapter->watchdog_events++; | adapter->watchdog_events++; | ||||
em_init_locked(adapter); | em_init_locked(adapter); | ||||
} | } | ||||
static void | static void | ||||
em_update_link_status(struct adapter *adapter) | em_update_link_status(struct adapter *adapter) | ||||
Show All 35 Lines | if (link_check && (adapter->link_active == 0)) { | ||||
e1000_get_speed_and_duplex(hw, &adapter->link_speed, | e1000_get_speed_and_duplex(hw, &adapter->link_speed, | ||||
&adapter->link_duplex); | &adapter->link_duplex); | ||||
/* Check if we must disable SPEED_MODE bit on PCI-E */ | /* Check if we must disable SPEED_MODE bit on PCI-E */ | ||||
if ((adapter->link_speed != SPEED_1000) && | if ((adapter->link_speed != SPEED_1000) && | ||||
((hw->mac.type == e1000_82571) || | ((hw->mac.type == e1000_82571) || | ||||
(hw->mac.type == e1000_82572))) { | (hw->mac.type == e1000_82572))) { | ||||
int tarc0; | int tarc0; | ||||
tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); | tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); | ||||
tarc0 &= ~SPEED_MODE_BIT; | tarc0 &= ~TARC_SPEED_MODE_BIT; | ||||
E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); | E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); | ||||
} | } | ||||
if (bootverbose) | if (bootverbose) | ||||
device_printf(dev, "Link is up %d Mbps %s\n", | device_printf(dev, "Link is up %d Mbps %s\n", | ||||
adapter->link_speed, | adapter->link_speed, | ||||
((adapter->link_duplex == FULL_DUPLEX) ? | ((adapter->link_duplex == FULL_DUPLEX) ? | ||||
"Full Duplex" : "Half Duplex")); | "Full Duplex" : "Half Duplex")); | ||||
adapter->link_active = 1; | adapter->link_active = 1; | ||||
▲ Show 20 Lines • Show All 99 Lines • ▼ Show 20 Lines | if (adapter->memory == NULL) { | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
adapter->osdep.mem_bus_space_tag = | adapter->osdep.mem_bus_space_tag = | ||||
rman_get_bustag(adapter->memory); | rman_get_bustag(adapter->memory); | ||||
adapter->osdep.mem_bus_space_handle = | adapter->osdep.mem_bus_space_handle = | ||||
rman_get_bushandle(adapter->memory); | rman_get_bushandle(adapter->memory); | ||||
adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; | adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; | ||||
/* Default to a single queue */ | |||||
adapter->num_queues = 1; | |||||
/* | |||||
* Setup MSI/X or MSI if PCI Express | |||||
*/ | |||||
adapter->msix = em_setup_msix(adapter); | |||||
adapter->hw.back = &adapter->osdep; | adapter->hw.back = &adapter->osdep; | ||||
return (0); | return (0); | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Setup the Legacy or MSI Interrupt handler | * Setup the Legacy or MSI Interrupt handler | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | |||||
**********************************************************************/ | **********************************************************************/ | ||||
int | int | ||||
em_allocate_msix(struct adapter *adapter) | em_allocate_msix(struct adapter *adapter) | ||||
{ | { | ||||
device_t dev = adapter->dev; | device_t dev = adapter->dev; | ||||
struct tx_ring *txr = adapter->tx_rings; | struct tx_ring *txr = adapter->tx_rings; | ||||
struct rx_ring *rxr = adapter->rx_rings; | struct rx_ring *rxr = adapter->rx_rings; | ||||
int error, rid, vector = 0; | int error, rid, vector = 0; | ||||
int cpu_id = 0; | |||||
/* Make sure all interrupts are disabled */ | /* Make sure all interrupts are disabled */ | ||||
E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); | E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); | ||||
/* First set up ring resources */ | /* First set up ring resources */ | ||||
for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) { | for (int i = 0; i < adapter->num_queues; i++, rxr++, vector++) { | ||||
/* RX ring */ | /* RX ring */ | ||||
rid = vector + 1; | rid = vector + 1; | ||||
rxr->res = bus_alloc_resource_any(dev, | rxr->res = bus_alloc_resource_any(dev, | ||||
SYS_RES_IRQ, &rid, RF_ACTIVE); | SYS_RES_IRQ, &rid, RF_ACTIVE); | ||||
if (rxr->res == NULL) { | if (rxr->res == NULL) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Unable to allocate bus resource: " | "Unable to allocate bus resource: " | ||||
"RX MSIX Interrupt %d\n", i); | "RX MSIX Interrupt %d\n", i); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
if ((error = bus_setup_intr(dev, rxr->res, | if ((error = bus_setup_intr(dev, rxr->res, | ||||
INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, | INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, | ||||
rxr, &rxr->tag)) != 0) { | rxr, &rxr->tag)) != 0) { | ||||
device_printf(dev, "Failed to register RX handler"); | device_printf(dev, "Failed to register RX handler"); | ||||
return (error); | return (error); | ||||
} | } | ||||
#if __FreeBSD_version >= 800504 | #if __FreeBSD_version >= 800504 | ||||
bus_describe_intr(dev, rxr->res, rxr->tag, "rx %d", i); | bus_describe_intr(dev, rxr->res, rxr->tag, "rx%d", i); | ||||
#endif | #endif | ||||
rxr->msix = vector++; /* NOTE increment vector for TX */ | rxr->msix = vector; | ||||
if (em_last_bind_cpu < 0) | |||||
em_last_bind_cpu = CPU_FIRST(); | |||||
cpu_id = em_last_bind_cpu; | |||||
bus_bind_intr(dev, rxr->res, cpu_id); | |||||
TASK_INIT(&rxr->rx_task, 0, em_handle_rx, rxr); | TASK_INIT(&rxr->rx_task, 0, em_handle_rx, rxr); | ||||
rxr->tq = taskqueue_create_fast("em_rxq", M_NOWAIT, | rxr->tq = taskqueue_create_fast("em_rxq", M_NOWAIT, | ||||
taskqueue_thread_enqueue, &rxr->tq); | taskqueue_thread_enqueue, &rxr->tq); | ||||
taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq", | taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq (cpuid %d)", | ||||
device_get_nameunit(adapter->dev)); | device_get_nameunit(adapter->dev), cpu_id); | ||||
/* | /* | ||||
** Set the bit to enable interrupt | ** Set the bit to enable interrupt | ||||
** in E1000_IMS -- bits 20 and 21 | ** in E1000_IMS -- bits 20 and 21 | ||||
** are for RX0 and RX1, note this has | ** are for RX0 and RX1, note this has | ||||
** NOTHING to do with the MSIX vector | ** NOTHING to do with the MSIX vector | ||||
*/ | */ | ||||
rxr->ims = 1 << (20 + i); | rxr->ims = 1 << (20 + i); | ||||
adapter->ims |= rxr->ims; | |||||
adapter->ivars |= (8 | rxr->msix) << (i * 4); | adapter->ivars |= (8 | rxr->msix) << (i * 4); | ||||
em_last_bind_cpu = CPU_NEXT(em_last_bind_cpu); | |||||
} | |||||
for (int i = 0; i < adapter->num_queues; i++, txr++, vector++) { | |||||
/* TX ring */ | /* TX ring */ | ||||
rid = vector + 1; | rid = vector + 1; | ||||
txr->res = bus_alloc_resource_any(dev, | txr->res = bus_alloc_resource_any(dev, | ||||
SYS_RES_IRQ, &rid, RF_ACTIVE); | SYS_RES_IRQ, &rid, RF_ACTIVE); | ||||
if (txr->res == NULL) { | if (txr->res == NULL) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Unable to allocate bus resource: " | "Unable to allocate bus resource: " | ||||
"TX MSIX Interrupt %d\n", i); | "TX MSIX Interrupt %d\n", i); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
if ((error = bus_setup_intr(dev, txr->res, | if ((error = bus_setup_intr(dev, txr->res, | ||||
INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, | INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, | ||||
txr, &txr->tag)) != 0) { | txr, &txr->tag)) != 0) { | ||||
device_printf(dev, "Failed to register TX handler"); | device_printf(dev, "Failed to register TX handler"); | ||||
return (error); | return (error); | ||||
} | } | ||||
#if __FreeBSD_version >= 800504 | #if __FreeBSD_version >= 800504 | ||||
bus_describe_intr(dev, txr->res, txr->tag, "tx %d", i); | bus_describe_intr(dev, txr->res, txr->tag, "tx%d", i); | ||||
#endif | #endif | ||||
txr->msix = vector++; /* Increment vector for next pass */ | txr->msix = vector; | ||||
if (em_last_bind_cpu < 0) | |||||
em_last_bind_cpu = CPU_FIRST(); | |||||
cpu_id = em_last_bind_cpu; | |||||
bus_bind_intr(dev, txr->res, cpu_id); | |||||
TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr); | TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr); | ||||
txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT, | txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT, | ||||
taskqueue_thread_enqueue, &txr->tq); | taskqueue_thread_enqueue, &txr->tq); | ||||
taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq", | taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq (cpuid %d)", | ||||
device_get_nameunit(adapter->dev)); | device_get_nameunit(adapter->dev), cpu_id); | ||||
/* | /* | ||||
** Set the bit to enable interrupt | ** Set the bit to enable interrupt | ||||
** in E1000_IMS -- bits 22 and 23 | ** in E1000_IMS -- bits 22 and 23 | ||||
** are for TX0 and TX1, note this has | ** are for TX0 and TX1, note this has | ||||
** NOTHING to do with the MSIX vector | ** NOTHING to do with the MSIX vector | ||||
*/ | */ | ||||
txr->ims = 1 << (22 + i); | txr->ims = 1 << (22 + i); | ||||
adapter->ims |= txr->ims; | |||||
adapter->ivars |= (8 | txr->msix) << (8 + (i * 4)); | adapter->ivars |= (8 | txr->msix) << (8 + (i * 4)); | ||||
em_last_bind_cpu = CPU_NEXT(em_last_bind_cpu); | |||||
} | } | ||||
/* Link interrupt */ | /* Link interrupt */ | ||||
++rid; | rid = vector + 1; | ||||
adapter->res = bus_alloc_resource_any(dev, | adapter->res = bus_alloc_resource_any(dev, | ||||
SYS_RES_IRQ, &rid, RF_ACTIVE); | SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); | ||||
if (!adapter->res) { | if (!adapter->res) { | ||||
device_printf(dev,"Unable to allocate " | device_printf(dev,"Unable to allocate " | ||||
"bus resource: Link interrupt [%d]\n", rid); | "bus resource: Link interrupt [%d]\n", rid); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
/* Set the link handler function */ | /* Set the link handler function */ | ||||
error = bus_setup_intr(dev, adapter->res, | error = bus_setup_intr(dev, adapter->res, | ||||
INTR_TYPE_NET | INTR_MPSAFE, NULL, | INTR_TYPE_NET | INTR_MPSAFE, NULL, | ||||
em_msix_link, adapter, &adapter->tag); | em_msix_link, adapter, &adapter->tag); | ||||
if (error) { | if (error) { | ||||
adapter->res = NULL; | adapter->res = NULL; | ||||
device_printf(dev, "Failed to register LINK handler"); | device_printf(dev, "Failed to register LINK handler"); | ||||
return (error); | return (error); | ||||
} | } | ||||
#if __FreeBSD_version >= 800504 | #if __FreeBSD_version >= 800504 | ||||
bus_describe_intr(dev, adapter->res, adapter->tag, "link"); | bus_describe_intr(dev, adapter->res, adapter->tag, "link"); | ||||
#endif | #endif | ||||
adapter->linkvec = vector; | adapter->linkvec = vector; | ||||
adapter->ivars |= (8 | vector) << 16; | adapter->ivars |= (8 | vector) << 16; | ||||
adapter->ivars |= 0x80000000; | adapter->ivars |= 0x80000000; | ||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
em_free_pci_resources(struct adapter *adapter) | em_free_pci_resources(struct adapter *adapter) | ||||
{ | { | ||||
device_t dev = adapter->dev; | device_t dev = adapter->dev; | ||||
struct tx_ring *txr; | struct tx_ring *txr; | ||||
struct rx_ring *rxr; | struct rx_ring *rxr; | ||||
int rid; | int rid; | ||||
/* | /* | ||||
** Release all the queue interrupt resources: | ** Release all the queue interrupt resources: | ||||
*/ | */ | ||||
for (int i = 0; i < adapter->num_queues; i++) { | for (int i = 0; i < adapter->num_queues; i++) { | ||||
txr = &adapter->tx_rings[i]; | txr = &adapter->tx_rings[i]; | ||||
rxr = &adapter->rx_rings[i]; | |||||
/* an early abort? */ | /* an early abort? */ | ||||
if ((txr == NULL) || (rxr == NULL)) | if (txr == NULL) | ||||
break; | break; | ||||
rid = txr->msix +1; | rid = txr->msix +1; | ||||
if (txr->tag != NULL) { | if (txr->tag != NULL) { | ||||
bus_teardown_intr(dev, txr->res, txr->tag); | bus_teardown_intr(dev, txr->res, txr->tag); | ||||
txr->tag = NULL; | txr->tag = NULL; | ||||
} | } | ||||
if (txr->res != NULL) | if (txr->res != NULL) | ||||
bus_release_resource(dev, SYS_RES_IRQ, | bus_release_resource(dev, SYS_RES_IRQ, | ||||
rid, txr->res); | rid, txr->res); | ||||
rxr = &adapter->rx_rings[i]; | |||||
/* an early abort? */ | |||||
if (rxr == NULL) | |||||
break; | |||||
rid = rxr->msix +1; | rid = rxr->msix +1; | ||||
if (rxr->tag != NULL) { | if (rxr->tag != NULL) { | ||||
bus_teardown_intr(dev, rxr->res, rxr->tag); | bus_teardown_intr(dev, rxr->res, rxr->tag); | ||||
rxr->tag = NULL; | rxr->tag = NULL; | ||||
} | } | ||||
if (rxr->res != NULL) | if (rxr->res != NULL) | ||||
bus_release_resource(dev, SYS_RES_IRQ, | bus_release_resource(dev, SYS_RES_IRQ, | ||||
rid, rxr->res); | rid, rxr->res); | ||||
Show All 33 Lines | |||||
* Setup MSI or MSI/X | * Setup MSI or MSI/X | ||||
*/ | */ | ||||
static int | static int | ||||
em_setup_msix(struct adapter *adapter) | em_setup_msix(struct adapter *adapter) | ||||
{ | { | ||||
device_t dev = adapter->dev; | device_t dev = adapter->dev; | ||||
int val; | int val; | ||||
/* Nearly always going to use one queue */ | |||||
adapter->num_queues = 1; | |||||
/* | /* | ||||
** Setup MSI/X for Hartwell: tests have shown | ** Try using MSI-X for Hartwell adapters | ||||
** use of two queues to be unstable, and to | |||||
** provide no great gain anyway, so we simply | |||||
** seperate the interrupts and use a single queue. | |||||
*/ | */ | ||||
if ((adapter->hw.mac.type == e1000_82574) && | if ((adapter->hw.mac.type == e1000_82574) && | ||||
(em_enable_msix == TRUE)) { | (em_enable_msix == TRUE)) { | ||||
#ifdef EM_MULTIQUEUE | |||||
adapter->num_queues = (em_num_queues == 1) ? 1 : 2; | |||||
if (adapter->num_queues > 1) | |||||
em_enable_vectors_82574(adapter); | |||||
#endif | |||||
/* Map the MSIX BAR */ | /* Map the MSIX BAR */ | ||||
int rid = PCIR_BAR(EM_MSIX_BAR); | int rid = PCIR_BAR(EM_MSIX_BAR); | ||||
adapter->msix_mem = bus_alloc_resource_any(dev, | adapter->msix_mem = bus_alloc_resource_any(dev, | ||||
SYS_RES_MEMORY, &rid, RF_ACTIVE); | SYS_RES_MEMORY, &rid, RF_ACTIVE); | ||||
if (adapter->msix_mem == NULL) { | if (adapter->msix_mem == NULL) { | ||||
/* May not be enabled */ | /* May not be enabled */ | ||||
device_printf(adapter->dev, | device_printf(adapter->dev, | ||||
"Unable to map MSIX table \n"); | "Unable to map MSIX table \n"); | ||||
goto msi; | goto msi; | ||||
} | } | ||||
val = pci_msix_count(dev); | val = pci_msix_count(dev); | ||||
/* We only need/want 3 vectors */ | |||||
#ifdef EM_MULTIQUEUE | |||||
/* We need 5 vectors in the multiqueue case */ | |||||
if (adapter->num_queues > 1 ) { | |||||
if (val >= 5) | |||||
val = 5; | |||||
else { | |||||
adapter->num_queues = 1; | |||||
device_printf(adapter->dev, | |||||
"Insufficient MSIX vectors for >1 queue, " | |||||
"using single queue...\n"); | |||||
goto msix_one; | |||||
} | |||||
} else { | |||||
msix_one: | |||||
#endif | |||||
if (val >= 3) | if (val >= 3) | ||||
val = 3; | val = 3; | ||||
else { | else { | ||||
device_printf(adapter->dev, | device_printf(adapter->dev, | ||||
"MSIX: insufficient vectors, using MSI\n"); | "Insufficient MSIX vectors, using MSI\n"); | ||||
goto msi; | goto msi; | ||||
} | } | ||||
#ifdef EM_MULTIQUEUE | |||||
} | |||||
#endif | |||||
if ((pci_alloc_msix(dev, &val) == 0) && (val == 3)) { | if ((pci_alloc_msix(dev, &val) == 0)) { | ||||
device_printf(adapter->dev, | device_printf(adapter->dev, | ||||
"Using MSIX interrupts " | "Using MSIX interrupts " | ||||
"with %d vectors\n", val); | "with %d vectors\n", val); | ||||
return (val); | return (val); | ||||
} | } | ||||
/* | /* | ||||
** If MSIX alloc failed or provided us with | ** If MSIX alloc failed or provided us with | ||||
** less than needed, free and fall through to MSI | ** less than needed, free and fall through to MSI | ||||
*/ | */ | ||||
pci_release_msi(dev); | pci_release_msi(dev); | ||||
} | } | ||||
msi: | msi: | ||||
if (adapter->msix_mem != NULL) { | if (adapter->msix_mem != NULL) { | ||||
bus_release_resource(dev, SYS_RES_MEMORY, | bus_release_resource(dev, SYS_RES_MEMORY, | ||||
PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem); | PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem); | ||||
adapter->msix_mem = NULL; | adapter->msix_mem = NULL; | ||||
} | } | ||||
val = 1; | val = 1; | ||||
if (pci_alloc_msi(dev, &val) == 0) { | if (pci_alloc_msi(dev, &val) == 0) { | ||||
device_printf(adapter->dev,"Using an MSI interrupt\n"); | device_printf(adapter->dev, "Using an MSI interrupt\n"); | ||||
return (val); | return (val); | ||||
} | } | ||||
/* Should only happen due to manual configuration */ | /* Should only happen due to manual configuration */ | ||||
device_printf(adapter->dev,"No MSI/MSIX using a Legacy IRQ\n"); | device_printf(adapter->dev,"No MSI/MSIX using a Legacy IRQ\n"); | ||||
return (0); | return (0); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 628 Lines • ▼ Show 20 Lines | |||||
* Enable transmit unit. | * Enable transmit unit. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static void | static void | ||||
em_initialize_transmit_unit(struct adapter *adapter) | em_initialize_transmit_unit(struct adapter *adapter) | ||||
{ | { | ||||
struct tx_ring *txr = adapter->tx_rings; | struct tx_ring *txr = adapter->tx_rings; | ||||
struct e1000_hw *hw = &adapter->hw; | struct e1000_hw *hw = &adapter->hw; | ||||
u32 tctl, tarc, tipg = 0; | u32 tctl, txdctl = 0, tarc, tipg = 0; | ||||
INIT_DEBUGOUT("em_initialize_transmit_unit: begin"); | INIT_DEBUGOUT("em_initialize_transmit_unit: begin"); | ||||
for (int i = 0; i < adapter->num_queues; i++, txr++) { | for (int i = 0; i < adapter->num_queues; i++, txr++) { | ||||
u64 bus_addr = txr->txdma.dma_paddr; | u64 bus_addr = txr->txdma.dma_paddr; | ||||
/* Base and Len of TX Ring */ | /* Base and Len of TX Ring */ | ||||
E1000_WRITE_REG(hw, E1000_TDLEN(i), | E1000_WRITE_REG(hw, E1000_TDLEN(i), | ||||
adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); | adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); | ||||
E1000_WRITE_REG(hw, E1000_TDBAH(i), | E1000_WRITE_REG(hw, E1000_TDBAH(i), | ||||
(u32)(bus_addr >> 32)); | (u32)(bus_addr >> 32)); | ||||
E1000_WRITE_REG(hw, E1000_TDBAL(i), | E1000_WRITE_REG(hw, E1000_TDBAL(i), | ||||
(u32)bus_addr); | (u32)bus_addr); | ||||
/* Init the HEAD/TAIL indices */ | /* Init the HEAD/TAIL indices */ | ||||
E1000_WRITE_REG(hw, E1000_TDT(i), 0); | E1000_WRITE_REG(hw, E1000_TDT(i), 0); | ||||
E1000_WRITE_REG(hw, E1000_TDH(i), 0); | E1000_WRITE_REG(hw, E1000_TDH(i), 0); | ||||
HW_DEBUGOUT2("Base = %x, Length = %x\n", | HW_DEBUGOUT2("Base = %x, Length = %x\n", | ||||
E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)), | E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)), | ||||
E1000_READ_REG(&adapter->hw, E1000_TDLEN(i))); | E1000_READ_REG(&adapter->hw, E1000_TDLEN(i))); | ||||
txr->busy = EM_TX_IDLE; | txr->busy = EM_TX_IDLE; | ||||
txdctl = 0; /* clear txdctl */ | |||||
txdctl |= 0x1f; /* PTHRESH */ | |||||
txdctl |= 1 << 8; /* HTHRESH */ | |||||
txdctl |= 1 << 16;/* WTHRESH */ | |||||
txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ | |||||
txdctl |= E1000_TXDCTL_GRAN; | |||||
txdctl |= 1 << 25; /* LWTHRESH */ | |||||
E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); | |||||
} | } | ||||
/* Set the default values for the Tx Inter Packet Gap timer */ | /* Set the default values for the Tx Inter Packet Gap timer */ | ||||
switch (adapter->hw.mac.type) { | switch (adapter->hw.mac.type) { | ||||
case e1000_80003es2lan: | case e1000_80003es2lan: | ||||
tipg = DEFAULT_82543_TIPG_IPGR1; | tipg = DEFAULT_82543_TIPG_IPGR1; | ||||
tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << | ||||
E1000_TIPG_IPGR2_SHIFT; | E1000_TIPG_IPGR2_SHIFT; | ||||
Show All 14 Lines | em_initialize_transmit_unit(struct adapter *adapter) | ||||
if(adapter->hw.mac.type >= e1000_82540) | if(adapter->hw.mac.type >= e1000_82540) | ||||
E1000_WRITE_REG(&adapter->hw, E1000_TADV, | E1000_WRITE_REG(&adapter->hw, E1000_TADV, | ||||
adapter->tx_abs_int_delay.value); | adapter->tx_abs_int_delay.value); | ||||
if ((adapter->hw.mac.type == e1000_82571) || | if ((adapter->hw.mac.type == e1000_82571) || | ||||
(adapter->hw.mac.type == e1000_82572)) { | (adapter->hw.mac.type == e1000_82572)) { | ||||
tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); | tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); | ||||
tarc |= SPEED_MODE_BIT; | tarc |= TARC_SPEED_MODE_BIT; | ||||
E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); | E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); | ||||
} else if (adapter->hw.mac.type == e1000_80003es2lan) { | } else if (adapter->hw.mac.type == e1000_80003es2lan) { | ||||
/* errata: program both queues to unweighted RR */ | |||||
tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); | tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); | ||||
tarc |= 1; | tarc |= 1; | ||||
E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); | E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); | ||||
tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); | tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); | ||||
tarc |= 1; | tarc |= 1; | ||||
E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); | E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); | ||||
} else if (adapter->hw.mac.type == e1000_82574) { | |||||
tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); | |||||
tarc |= TARC_ERRATA_BIT; | |||||
if ( adapter->num_queues > 1) { | |||||
tarc |= (TARC_COMPENSATION_MODE | TARC_MQ_FIX); | |||||
E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); | |||||
E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); | |||||
} else | |||||
E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); | |||||
} | } | ||||
adapter->txd_cmd = E1000_TXD_CMD_IFCS; | adapter->txd_cmd = E1000_TXD_CMD_IFCS; | ||||
if (adapter->tx_int_delay.value > 0) | if (adapter->tx_int_delay.value > 0) | ||||
adapter->txd_cmd |= E1000_TXD_CMD_IDE; | adapter->txd_cmd |= E1000_TXD_CMD_IDE; | ||||
/* Program the Transmit Control Register */ | /* Program the Transmit Control Register */ | ||||
tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); | tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); | ||||
▲ Show 20 Lines • Show All 414 Lines • ▼ Show 20 Lines | txr->busy = EM_TX_BUSY; /* note this clears HUNG */ | ||||
/* | /* | ||||
* If we have a minimum free, clear IFF_DRV_OACTIVE | * If we have a minimum free, clear IFF_DRV_OACTIVE | ||||
* to tell the stack that it is OK to send packets. | * to tell the stack that it is OK to send packets. | ||||
* Notice that all writes of OACTIVE happen under the | * Notice that all writes of OACTIVE happen under the | ||||
* TX lock which, with a single queue, guarantees | * TX lock which, with a single queue, guarantees | ||||
* sanity. | * sanity. | ||||
*/ | */ | ||||
if (txr->tx_avail >= EM_MAX_SCATTER) | if (txr->tx_avail >= EM_MAX_SCATTER) { | ||||
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); | if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); | ||||
} | |||||
/* Disable hang detection if all clean */ | /* Disable hang detection if all clean */ | ||||
if (txr->tx_avail == adapter->num_tx_desc) | if (txr->tx_avail == adapter->num_tx_desc) | ||||
txr->busy = EM_TX_IDLE; | txr->busy = EM_TX_IDLE; | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
▲ Show 20 Lines • Show All 355 Lines • ▼ Show 20 Lines | em_initialize_receive_unit(struct adapter *adapter) | ||||
*/ | */ | ||||
rctl = E1000_READ_REG(hw, E1000_RCTL); | rctl = E1000_READ_REG(hw, E1000_RCTL); | ||||
/* Do not disable if ever enabled on this hardware */ | /* Do not disable if ever enabled on this hardware */ | ||||
if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583)) | if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583)) | ||||
E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); | E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); | ||||
E1000_WRITE_REG(&adapter->hw, E1000_RADV, | E1000_WRITE_REG(&adapter->hw, E1000_RADV, | ||||
adapter->rx_abs_int_delay.value); | adapter->rx_abs_int_delay.value); | ||||
E1000_WRITE_REG(&adapter->hw, E1000_RDTR, | |||||
adapter->rx_int_delay.value); | |||||
/* | /* | ||||
* Set the interrupt throttling rate. Value is calculated | * Set the interrupt throttling rate. Value is calculated | ||||
* as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) | * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) | ||||
*/ | */ | ||||
E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR); | E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR); | ||||
/* | /* | ||||
** When using MSIX interrupts we need to throttle | ** When using MSIX interrupts we need to throttle | ||||
** using the EITR register (82574 only) | ** using the EITR register (82574 only) | ||||
*/ | */ | ||||
if (hw->mac.type == e1000_82574) { | if (hw->mac.type == e1000_82574) { | ||||
u32 rfctl; | |||||
for (int i = 0; i < 4; i++) | for (int i = 0; i < 4; i++) | ||||
E1000_WRITE_REG(hw, E1000_EITR_82574(i), | E1000_WRITE_REG(hw, E1000_EITR_82574(i), | ||||
DEFAULT_ITR); | DEFAULT_ITR); | ||||
/* Disable accelerated acknowledge */ | /* Disable accelerated acknowledge */ | ||||
E1000_WRITE_REG(hw, E1000_RFCTL, E1000_RFCTL_ACK_DIS); | rfctl = E1000_READ_REG(hw, E1000_RFCTL); | ||||
rfctl |= E1000_RFCTL_ACK_DIS; | |||||
E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); | |||||
} | } | ||||
rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); | rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); | ||||
if (if_getcapenable(ifp) & IFCAP_RXCSUM) | if (if_getcapenable(ifp) & IFCAP_RXCSUM) { | ||||
#ifdef EM_MULTIQUEUE | |||||
rxcsum |= E1000_RXCSUM_TUOFL | | |||||
E1000_RXCSUM_IPOFL | | |||||
E1000_RXCSUM_PCSD; | |||||
#else | |||||
rxcsum |= E1000_RXCSUM_TUOFL; | rxcsum |= E1000_RXCSUM_TUOFL; | ||||
else | #endif | ||||
} else | |||||
rxcsum &= ~E1000_RXCSUM_TUOFL; | rxcsum &= ~E1000_RXCSUM_TUOFL; | ||||
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); | E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); | ||||
#ifdef EM_MULTIQUEUE | |||||
if (adapter->num_queues > 1) { | |||||
uint32_t rss_key[10]; | |||||
uint32_t reta; | |||||
int i; | |||||
/* | /* | ||||
* Configure RSS key | |||||
*/ | |||||
arc4rand(rss_key, sizeof(rss_key), 0); | |||||
for (i = 0; i < 10; ++i) | |||||
E1000_WRITE_REG_ARRAY(hw,E1000_RSSRK(0), i, rss_key[i]); | |||||
/* | |||||
* Configure RSS redirect table in following fashion: | |||||
* (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] | |||||
*/ | |||||
reta = 0; | |||||
for (i = 0; i < 4; ++i) { | |||||
uint32_t q; | |||||
q = (i % adapter->num_queues) << 7; | |||||
reta |= q << (8 * i); | |||||
} | |||||
for (i = 0; i < 32; ++i) | |||||
E1000_WRITE_REG(hw, E1000_RETA(i), reta); | |||||
E1000_WRITE_REG(hw, E1000_MRQC, E1000_MRQC_RSS_ENABLE_2Q | | |||||
E1000_MRQC_RSS_FIELD_IPV4_TCP | | |||||
E1000_MRQC_RSS_FIELD_IPV4 | | |||||
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX | | |||||
E1000_MRQC_RSS_FIELD_IPV6_EX | | |||||
E1000_MRQC_RSS_FIELD_IPV6 | | |||||
E1000_MRQC_RSS_FIELD_IPV6_TCP); | |||||
} | |||||
#endif | |||||
/* | |||||
** XXX TEMPORARY WORKAROUND: on some systems with 82573 | ** XXX TEMPORARY WORKAROUND: on some systems with 82573 | ||||
** long latencies are observed, like Lenovo X60. This | ** long latencies are observed, like Lenovo X60. This | ||||
** change eliminates the problem, but since having positive | ** change eliminates the problem, but since having positive | ||||
** values in RDTR is a known source of problems on other | ** values in RDTR is a known source of problems on other | ||||
** platforms another solution is being sought. | ** platforms another solution is being sought. | ||||
*/ | */ | ||||
if (hw->mac.type == e1000_82573) | if (hw->mac.type == e1000_82573) | ||||
E1000_WRITE_REG(hw, E1000_RDTR, 0x20); | E1000_WRITE_REG(hw, E1000_RDTR, 0x20); | ||||
Show All 17 Lines | #ifdef DEV_NETMAP | ||||
if (if_getcapenable(ifp) & IFCAP_NETMAP) { | if (if_getcapenable(ifp) & IFCAP_NETMAP) { | ||||
struct netmap_adapter *na = netmap_getna(adapter->ifp); | struct netmap_adapter *na = netmap_getna(adapter->ifp); | ||||
rdt -= nm_kr_rxspace(&na->rx_rings[i]); | rdt -= nm_kr_rxspace(&na->rx_rings[i]); | ||||
} | } | ||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
E1000_WRITE_REG(hw, E1000_RDT(i), rdt); | E1000_WRITE_REG(hw, E1000_RDT(i), rdt); | ||||
} | } | ||||
/* Set PTHRESH for improved jumbo performance */ | /* | ||||
* Set PTHRESH for improved jumbo performance | |||||
* According to 10.2.5.11 of Intel 82574 Datasheet, | |||||
* RXDCTL(1) is written whenever RXDCTL(0) is written. | |||||
* Only write to RXDCTL(1) if there is a need for different | |||||
* settings. | |||||
*/ | |||||
if (((adapter->hw.mac.type == e1000_ich9lan) || | if (((adapter->hw.mac.type == e1000_ich9lan) || | ||||
(adapter->hw.mac.type == e1000_pch2lan) || | (adapter->hw.mac.type == e1000_pch2lan) || | ||||
(adapter->hw.mac.type == e1000_ich10lan)) && | (adapter->hw.mac.type == e1000_ich10lan)) && | ||||
(if_getmtu(ifp) > ETHERMTU)) { | (if_getmtu(ifp) > ETHERMTU)) { | ||||
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); | u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); | ||||
E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); | E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); | ||||
} else if ((adapter->hw.mac.type == e1000_82574) && | |||||
(if_getmtu(ifp) > ETHERMTU)) { | |||||
for (int i = 0; i < adapter->num_queues; i++) { | |||||
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); | |||||
rxdctl |= 0x20; /* PTHRESH */ | |||||
rxdctl |= 4 << 8; /* HTHRESH */ | |||||
rxdctl |= 4 << 16;/* WTHRESH */ | |||||
rxdctl |= 1 << 24; /* Switch to granularity */ | |||||
E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); | |||||
} | } | ||||
} | |||||
if (adapter->hw.mac.type >= e1000_pch2lan) { | if (adapter->hw.mac.type >= e1000_pch2lan) { | ||||
if (if_getmtu(ifp) > ETHERMTU) | if (if_getmtu(ifp) > ETHERMTU) | ||||
e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); | e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); | ||||
else | else | ||||
e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); | e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | em_rxeof(struct rx_ring *rxr, int count, int *done) | ||||
u8 status = 0; | u8 status = 0; | ||||
u16 len; | u16 len; | ||||
int i, processed, rxdone = 0; | int i, processed, rxdone = 0; | ||||
bool eop; | bool eop; | ||||
struct e1000_rx_desc *cur; | struct e1000_rx_desc *cur; | ||||
EM_RX_LOCK(rxr); | EM_RX_LOCK(rxr); | ||||
/* Sync the ring */ | |||||
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, | |||||
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | |||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
if (netmap_rx_irq(ifp, rxr->me, &processed)) { | if (netmap_rx_irq(ifp, rxr->me, &processed)) { | ||||
EM_RX_UNLOCK(rxr); | EM_RX_UNLOCK(rxr); | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
for (i = rxr->next_to_check, processed = 0; count != 0;) { | for (i = rxr->next_to_check, processed = 0; count != 0;) { | ||||
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) | if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) | ||||
break; | break; | ||||
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, | |||||
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | |||||
cur = &rxr->rx_base[i]; | cur = &rxr->rx_base[i]; | ||||
status = cur->status; | status = cur->status; | ||||
mp = sendmp = NULL; | mp = sendmp = NULL; | ||||
if ((status & E1000_RXD_STAT_DD) == 0) | if ((status & E1000_RXD_STAT_DD) == 0) | ||||
break; | break; | ||||
len = le16toh(cur->length); | len = le16toh(cur->length); | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | #endif | ||||
sendmp->m_flags |= M_VLANTAG; | sendmp->m_flags |= M_VLANTAG; | ||||
} | } | ||||
#ifndef __NO_STRICT_ALIGNMENT | #ifndef __NO_STRICT_ALIGNMENT | ||||
skip: | skip: | ||||
#endif | #endif | ||||
rxr->fmp = rxr->lmp = NULL; | rxr->fmp = rxr->lmp = NULL; | ||||
} | } | ||||
next_desc: | next_desc: | ||||
/* Sync the ring */ | |||||
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, | |||||
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | |||||
/* Zero out the receive descriptors status. */ | /* Zero out the receive descriptors status. */ | ||||
cur->status = 0; | cur->status = 0; | ||||
++rxdone; /* cumulative for POLL */ | ++rxdone; /* cumulative for POLL */ | ||||
++processed; | ++processed; | ||||
/* Advance our pointers to the next descriptor. */ | /* Advance our pointers to the next descriptor. */ | ||||
if (++i == adapter->num_rx_desc) | if (++i == adapter->num_rx_desc) | ||||
i = 0; | i = 0; | ||||
▲ Show 20 Lines • Show All 805 Lines • ▼ Show 20 Lines | SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", | ||||
"Receiver Control Register"); | "Receiver Control Register"); | ||||
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", | ||||
CTLFLAG_RD, &adapter->hw.fc.high_water, 0, | CTLFLAG_RD, &adapter->hw.fc.high_water, 0, | ||||
"Flow Control High Watermark"); | "Flow Control High Watermark"); | ||||
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", | ||||
CTLFLAG_RD, &adapter->hw.fc.low_water, 0, | CTLFLAG_RD, &adapter->hw.fc.low_water, 0, | ||||
"Flow Control Low Watermark"); | "Flow Control Low Watermark"); | ||||
for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { | for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) { | ||||
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); | snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i); | ||||
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, | queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, | ||||
CTLFLAG_RD, NULL, "Queue Name"); | CTLFLAG_RD, NULL, "TX Queue Name"); | ||||
queue_list = SYSCTL_CHILDREN(queue_node); | queue_list = SYSCTL_CHILDREN(queue_node); | ||||
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", | ||||
CTLTYPE_UINT | CTLFLAG_RD, adapter, | CTLTYPE_UINT | CTLFLAG_RD, adapter, | ||||
E1000_TDH(txr->me), | E1000_TDH(txr->me), | ||||
em_sysctl_reg_handler, "IU", | em_sysctl_reg_handler, "IU", | ||||
"Transmit Descriptor Head"); | "Transmit Descriptor Head"); | ||||
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", | ||||
CTLTYPE_UINT | CTLFLAG_RD, adapter, | CTLTYPE_UINT | CTLFLAG_RD, adapter, | ||||
E1000_TDT(txr->me), | E1000_TDT(txr->me), | ||||
em_sysctl_reg_handler, "IU", | em_sysctl_reg_handler, "IU", | ||||
"Transmit Descriptor Tail"); | "Transmit Descriptor Tail"); | ||||
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", | SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", | ||||
CTLFLAG_RD, &txr->tx_irq, | CTLFLAG_RD, &txr->tx_irq, | ||||
"Queue MSI-X Transmit Interrupts"); | "Queue MSI-X Transmit Interrupts"); | ||||
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_desc_avail", | SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_desc_avail", | ||||
CTLFLAG_RD, &txr->no_desc_avail, | CTLFLAG_RD, &txr->no_desc_avail, | ||||
"Queue No Descriptor Available"); | "Queue No Descriptor Available"); | ||||
snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", i); | |||||
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, | |||||
CTLFLAG_RD, NULL, "RX Queue Name"); | |||||
queue_list = SYSCTL_CHILDREN(queue_node); | |||||
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", | ||||
CTLTYPE_UINT | CTLFLAG_RD, adapter, | CTLTYPE_UINT | CTLFLAG_RD, adapter, | ||||
E1000_RDH(rxr->me), | E1000_RDH(rxr->me), | ||||
em_sysctl_reg_handler, "IU", | em_sysctl_reg_handler, "IU", | ||||
"Receive Descriptor Head"); | "Receive Descriptor Head"); | ||||
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", | ||||
CTLTYPE_UINT | CTLFLAG_RD, adapter, | CTLTYPE_UINT | CTLFLAG_RD, adapter, | ||||
E1000_RDT(rxr->me), | E1000_RDT(rxr->me), | ||||
▲ Show 20 Lines • Show All 417 Lines • ▼ Show 20 Lines | em_print_debug_info(struct adapter *adapter) | ||||
else | else | ||||
printf("Interface is NOT RUNNING\n"); | printf("Interface is NOT RUNNING\n"); | ||||
if (if_getdrvflags(adapter->ifp) & IFF_DRV_OACTIVE) | if (if_getdrvflags(adapter->ifp) & IFF_DRV_OACTIVE) | ||||
printf("and INACTIVE\n"); | printf("and INACTIVE\n"); | ||||
else | else | ||||
printf("and ACTIVE\n"); | printf("and ACTIVE\n"); | ||||
for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) { | |||||
device_printf(dev, "TX Queue %d ------\n", i); | |||||
device_printf(dev, "hw tdh = %d, hw tdt = %d\n", | device_printf(dev, "hw tdh = %d, hw tdt = %d\n", | ||||
E1000_READ_REG(&adapter->hw, E1000_TDH(0)), | E1000_READ_REG(&adapter->hw, E1000_TDH(i)), | ||||
E1000_READ_REG(&adapter->hw, E1000_TDT(0))); | E1000_READ_REG(&adapter->hw, E1000_TDT(i))); | ||||
device_printf(dev, "hw rdh = %d, hw rdt = %d\n", | |||||
E1000_READ_REG(&adapter->hw, E1000_RDH(0)), | |||||
E1000_READ_REG(&adapter->hw, E1000_RDT(0))); | |||||
device_printf(dev, "Tx Queue Status = %d\n", txr->busy); | device_printf(dev, "Tx Queue Status = %d\n", txr->busy); | ||||
device_printf(dev, "TX descriptors avail = %d\n", | device_printf(dev, "TX descriptors avail = %d\n", | ||||
txr->tx_avail); | txr->tx_avail); | ||||
device_printf(dev, "Tx Descriptors avail failure = %ld\n", | device_printf(dev, "Tx Descriptors avail failure = %ld\n", | ||||
txr->no_desc_avail); | txr->no_desc_avail); | ||||
device_printf(dev, "RX Queue %d ------\n", i); | |||||
device_printf(dev, "hw rdh = %d, hw rdt = %d\n", | |||||
E1000_READ_REG(&adapter->hw, E1000_RDH(i)), | |||||
E1000_READ_REG(&adapter->hw, E1000_RDT(i))); | |||||
device_printf(dev, "RX discarded packets = %ld\n", | device_printf(dev, "RX discarded packets = %ld\n", | ||||
rxr->rx_discarded); | rxr->rx_discarded); | ||||
device_printf(dev, "RX Next to Check = %d\n", rxr->next_to_check); | device_printf(dev, "RX Next to Check = %d\n", rxr->next_to_check); | ||||
device_printf(dev, "RX Next to Refresh = %d\n", rxr->next_to_refresh); | device_printf(dev, "RX Next to Refresh = %d\n", rxr->next_to_refresh); | ||||
} | } | ||||
} | |||||
#ifdef EM_MULTIQUEUE | |||||
/* | |||||
* 82574 only: | |||||
* Write a new value to the EEPROM increasing the number of MSIX | |||||
* vectors from 3 to 5, for proper multiqueue support. | |||||
*/ | |||||
static void | |||||
em_enable_vectors_82574(struct adapter *adapter) | |||||
{ | |||||
struct e1000_hw *hw = &adapter->hw; | |||||
device_t dev = adapter->dev; | |||||
u16 edata; | |||||
e1000_read_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata); | |||||
printf("Current cap: %#06x\n", edata); | |||||
if (((edata & EM_NVM_MSIX_N_MASK) >> EM_NVM_MSIX_N_SHIFT) != 4) { | |||||
device_printf(dev, "Writing to eeprom: increasing " | |||||
"reported MSIX vectors from 3 to 5...\n"); | |||||
edata &= ~(EM_NVM_MSIX_N_MASK); | |||||
edata |= 4 << EM_NVM_MSIX_N_SHIFT; | |||||
e1000_write_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata); | |||||
e1000_update_nvm_checksum(hw); | |||||
device_printf(dev, "Writing to eeprom: done\n"); | |||||
} | |||||
} | |||||
#endif | |||||
#ifdef DDB | |||||
DB_COMMAND(em_reset_dev, em_ddb_reset_dev) | |||||
{ | |||||
devclass_t dc; | |||||
int max_em; | |||||
dc = devclass_find("em"); | |||||
max_em = devclass_get_maxunit(dc); | |||||
for (int index = 0; index < (max_em - 1); index++) { | |||||
device_t dev; | |||||
dev = devclass_get_device(dc, index); | |||||
if (device_get_driver(dev) == &em_driver) { | |||||
struct adapter *adapter = device_get_softc(dev); | |||||
em_init_locked(adapter); | |||||
} | |||||
} | |||||
} | |||||
DB_COMMAND(em_dump_queue, em_ddb_dump_queue) | |||||
{ | |||||
devclass_t dc; | |||||
int max_em; | |||||
dc = devclass_find("em"); | |||||
max_em = devclass_get_maxunit(dc); | |||||
for (int index = 0; index < (max_em - 1); index++) { | |||||
device_t dev; | |||||
dev = devclass_get_device(dc, index); | |||||
if (device_get_driver(dev) == &em_driver) | |||||
em_print_debug_info(device_get_softc(dev)); | |||||
} | |||||
} | |||||
#endif |