Index: sys/dev/e1000/if_em.h =================================================================== --- sys/dev/e1000/if_em.h +++ sys/dev/e1000/if_em.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2015, Intel Corporation + Copyright (c) 2001-2011, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -286,14 +286,6 @@ * solve it just using this define. */ #define EM_EIAC 0x000DC -/* - * 82574 only reports 3 MSI-X vectors by default; - * defines assisting with making it report 5 are - * located here. - */ -#define EM_NVM_PCIE_CTRL 0x1B -#define EM_NVM_MSIX_N_MASK (0x7 << EM_NVM_MSIX_N_SHIFT) -#define EM_NVM_MSIX_N_SHIFT 7 /* * Bus dma allocation structure used by @@ -317,6 +309,24 @@ }; /* +** Driver queue struct: this is the interrupt container +** for the associated tx and rx ring. +*/ +struct em_queue { + struct adapter *adapter; + u32 msix; /* This queue's MSIX vector */ + u32 ims; /* This queue's EIMS bit */ + u32 eitr_setting; + struct resource *res; + void *tag; + struct tx_ring *txr; + struct rx_ring *rxr; + struct task que_task; + struct taskqueue *tq; + u64 irqs; +}; + +/* * The transmit ring, one per tx queue */ struct tx_ring { @@ -324,16 +334,12 @@ struct mtx tx_mtx; char mtx_name[16]; u32 me; - u32 msix; - u32 ims; - int busy; + int busy; struct em_dma_alloc txdma; struct e1000_tx_desc *tx_base; - struct task tx_task; - struct taskqueue *tq; u32 next_avail_desc; u32 next_to_clean; - struct em_txbuffer *tx_buffers; + struct em_buffer *tx_buffers; volatile u16 tx_avail; u32 tx_tso; /* last tx was tso */ u16 last_hw_offload; @@ -346,9 +352,6 @@ #endif /* Interrupt resources */ bus_dma_tag_t txtag; - void *tag; - struct resource *res; - unsigned long tx_irq; unsigned long no_desc_avail; }; @@ -358,29 +361,22 @@ struct rx_ring { struct adapter *adapter; u32 me; - u32 msix; - u32 ims; struct mtx rx_mtx; char mtx_name[16]; u32 payload; - struct task rx_task; - struct taskqueue *tq; union e1000_rx_desc_extended *rx_base; struct em_dma_alloc rxdma; u32 next_to_refresh; u32 next_to_check; - struct em_rxbuffer *rx_buffers; + struct em_buffer *rx_buffers; struct mbuf *fmp; struct mbuf *lmp; /* Interrupt resources */ - void *tag; - struct resource *res; bus_dma_tag_t rxtag; bool discard; /* Soft stats */ - unsigned long rx_irq; unsigned long rx_discarded; unsigned long rx_packets; unsigned long rx_bytes; @@ -414,7 +410,6 @@ int min_frame_size; struct mtx core_mtx; int em_insert_vlan_header; - u32 ims; bool in_detach; /* Task for FAST handling */ @@ -428,6 +423,8 @@ u16 num_vlans; u8 num_queues; + /* Interface queues */ + struct em_queue *queues; /* * Transmit rings: * Allocated at run time, an array of rings. @@ -445,6 +442,8 @@ u32 rx_process_limit; u32 rx_mbuf_sz; + u64 que_mask; + /* Management and WOL features */ u32 wol; bool has_manage; @@ -501,13 +500,7 @@ unsigned int index; } em_vendor_info_t; -struct em_txbuffer { - int next_eop; /* Index of the desc to watch */ - struct mbuf *m_head; - bus_dmamap_t map; /* bus_dma map for packet */ -}; - -struct em_rxbuffer { +struct em_buffer { int next_eop; /* Index of the desc to watch */ struct mbuf *m_head; bus_dmamap_t map; /* bus_dma map for packet */ Index: sys/dev/e1000/if_em.c =================================================================== --- sys/dev/e1000/if_em.c +++ sys/dev/e1000/if_em.c @@ -96,6 +96,11 @@ #include "if_em.h" /********************************************************************* + * Set this to one to display debug statistics + *********************************************************************/ +int em_display_debug_stats = 0; + +/********************************************************************* * Driver version: *********************************************************************/ char em_driver_version[] = "7.6.1-k"; @@ -259,12 +264,12 @@ static void em_update_stats_counters(struct adapter *); static void em_add_hw_stats(struct adapter *adapter); static void em_txeof(struct tx_ring *); -static bool em_rxeof(struct rx_ring *, int, int *); +static bool em_rxeof(struct em_queue *, int, int *); #ifndef __NO_STRICT_ALIGNMENT static int em_fixup_rx(struct rx_ring *); #endif static void em_setup_rxdesc(union e1000_rx_desc_extended *, - const struct em_rxbuffer *rxbuf); + const struct em_buffer *rxbuf); static void em_receive_checksum(uint32_t status, struct mbuf *); static void em_transmit_checksum_setup(struct tx_ring *, struct mbuf *, int, struct ip *, u32 *, u32 *); @@ -304,17 +309,11 @@ static int em_irq_fast(void *); /* MSIX handlers */ -static void em_msix_tx(void *); -static void em_msix_rx(void *); +static void em_msix_que(void *); static void em_msix_link(void *); -static void em_handle_tx(void *context, int pending); -static void em_handle_rx(void *context, int pending); +static void em_handle_que(void *context, int pending); static void em_handle_link(void *context, int pending); -#ifdef EM_MULTIQUEUE -static void em_enable_vectors_82574(struct adapter *); -#endif - static void em_set_sysctl_value(struct adapter *, const char *, const char *, int *, int); static int em_set_flowcntl(SYSCTL_HANDLER_ARGS); @@ -1031,7 +1030,8 @@ em_mq_start(if_t ifp, struct mbuf *m) { struct adapter *adapter = if_getsoftc(ifp); - struct tx_ring *txr = adapter->tx_rings; + struct tx_ring *txr; + struct em_queue *que; unsigned int i, error; if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) @@ -1040,16 +1040,30 @@ i = curcpu % adapter->num_queues; txr = &adapter->tx_rings[i]; + que = &adapter->queues[i]; error = drbr_enqueue(ifp, txr->br, m); if (error) return (error); +#ifdef EM_MULTIQUEUE + /* + * There is no reason to ever defer processing that I'm + * able to find. Just do it. Deferring needs to be redesigned + * in this driver or something to keep from sending out of order + * packets. + */ + EM_TX_LOCK(txr); + em_mq_start_locked(ifp, txr); + EM_TX_UNLOCK(txr); +#else if (EM_TX_TRYLOCK(txr)) { em_mq_start_locked(ifp, txr); EM_TX_UNLOCK(txr); - } else - taskqueue_enqueue(txr->tq, &txr->tx_task); + } else { + taskqueue_enqueue(que->tq, &que->que_task); + } +#endif return (0); } @@ -1548,6 +1562,7 @@ em_irq_fast(void *arg) { struct adapter *adapter = arg; + struct em_queue *que = adapter->queues; if_t ifp; u32 reg_icr; @@ -1572,7 +1587,7 @@ return FILTER_STRAY; em_disable_intr(adapter); - taskqueue_enqueue(adapter->tq, &adapter->que_task); + taskqueue_enqueue(que->tq, &que->que_task); /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { @@ -1589,13 +1604,16 @@ static void em_handle_que(void *context, int pending) { - struct adapter *adapter = context; + struct em_queue *que = context; + struct adapter *adapter = que->adapter; + struct tx_ring *txr = que->txr; if_t ifp = adapter->ifp; - struct tx_ring *txr = adapter->tx_rings; - struct rx_ring *rxr = adapter->rx_rings; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { - bool more = em_rxeof(rxr, adapter->rx_process_limit, NULL); + bool more = em_rxeof(que, adapter->rx_process_limit, NULL); + + while (more) + more = em_rxeof(que, adapter->rx_process_limit, NULL); EM_TX_LOCK(txr); em_txeof(txr); @@ -1606,14 +1624,12 @@ if (!if_sendq_empty(ifp)) em_start_locked(ifp, txr); #endif + /* Re-enable this interrupt */ + E1000_WRITE_REG(&adapter->hw, E1000_IMS, que->ims); EM_TX_UNLOCK(txr); - if (more) { - taskqueue_enqueue(adapter->tq, &adapter->que_task); - return; - } } + callout_reset(&adapter->timer, hz, em_local_timer, adapter); - em_enable_intr(adapter); return; } @@ -1624,13 +1640,22 @@ * **********************************************************************/ static void -em_msix_tx(void *arg) +em_msix_que(void *arg) { - struct tx_ring *txr = arg; - struct adapter *adapter = txr->adapter; + struct em_queue *que = arg; + struct adapter *adapter = que->adapter; + struct tx_ring *txr = que->txr; + bool more = em_rxeof(que, adapter->rx_process_limit, NULL); if_t ifp = adapter->ifp; - ++txr->tx_irq; + if (!(if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING)) + return; + + ++que->irqs; + + while (more) + more = em_rxeof(que, adapter->rx_process_limit, NULL); + EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE @@ -1640,36 +1665,11 @@ if (!if_sendq_empty(ifp)) em_start_locked(ifp, txr); #endif - /* Reenable this interrupt */ - E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims); + E1000_WRITE_REG(&adapter->hw, E1000_IMS, que->ims); + callout_reset(&adapter->timer, hz, em_local_timer, adapter); EM_TX_UNLOCK(txr); - return; -} - -/********************************************************************* - * - * MSIX RX Interrupt Service routine - * - **********************************************************************/ - -static void -em_msix_rx(void *arg) -{ - struct rx_ring *rxr = arg; - struct adapter *adapter = rxr->adapter; - bool more; - ++rxr->rx_irq; - if (!(if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING)) - return; - more = em_rxeof(rxr, adapter->rx_process_limit, NULL); - if (more) - taskqueue_enqueue(rxr->tq, &rxr->rx_task); - else { - /* Reenable this interrupt */ - E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims); - } return; } @@ -1704,48 +1704,12 @@ */ if (reg_icr) { E1000_WRITE_REG(&adapter->hw, - E1000_ICS, adapter->ims); + E1000_ICS, adapter->que_mask); } return; } static void -em_handle_rx(void *context, int pending) -{ - struct rx_ring *rxr = context; - struct adapter *adapter = rxr->adapter; - bool more; - - more = em_rxeof(rxr, adapter->rx_process_limit, NULL); - if (more) - taskqueue_enqueue(rxr->tq, &rxr->rx_task); - else { - /* Reenable this interrupt */ - E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims); - } -} - -static void -em_handle_tx(void *context, int pending) -{ - struct tx_ring *txr = context; - struct adapter *adapter = txr->adapter; - if_t ifp = adapter->ifp; - - EM_TX_LOCK(txr); - em_txeof(txr); -#ifdef EM_MULTIQUEUE - if (!drbr_empty(ifp, txr->br)) - em_mq_start_locked(ifp, txr); -#else - if (!if_sendq_empty(ifp)) - em_start_locked(ifp, txr); -#endif - E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims); - EM_TX_UNLOCK(txr); -} - -static void em_handle_link(void *context, int pending) { struct adapter *adapter = context; @@ -1900,7 +1864,7 @@ struct adapter *adapter = txr->adapter; bus_dma_segment_t segs[EM_MAX_SCATTER]; bus_dmamap_t map; - struct em_txbuffer *tx_buffer, *tx_buffer_mapped; + struct em_buffer *tx_buffer, *tx_buffer_mapped; struct e1000_tx_desc *ctxd = NULL; struct mbuf *m_head; struct ether_header *eh; @@ -2312,8 +2276,8 @@ struct adapter *adapter = arg; if_t ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; - struct rx_ring *rxr = adapter->rx_rings; - u32 trigger = 0; + struct em_queue *que = adapter->queues; + u32 trigger; EM_CORE_LOCK_ASSERT(adapter); @@ -2326,11 +2290,9 @@ e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); /* Mask to use in the irq trigger */ - if (adapter->msix_mem) { - for (int i = 0; i < adapter->num_queues; i++, rxr++) - trigger |= rxr->ims; - rxr = adapter->rx_rings; - } else + if (adapter->msix_mem) + trigger = adapter->que_mask; + else trigger = E1000_ICS_RXDMT0; /* @@ -2345,7 +2307,7 @@ txr->busy = EM_TX_HUNG; /* Schedule a TX tasklet if needed */ if (txr->tx_avail <= EM_MAX_SCATTER) - taskqueue_enqueue(txr->tq, &txr->tx_task); + taskqueue_enqueue(que->tq, &que->que_task); } callout_reset(&adapter->timer, hz, em_local_timer, adapter); @@ -2546,7 +2508,7 @@ em_allocate_legacy(struct adapter *adapter) { device_t dev = adapter->dev; - struct tx_ring *txr = adapter->tx_rings; + struct em_queue *que = adapter->queues; int error, rid = 0; /* Manually turn off all interrupts */ @@ -2567,17 +2529,11 @@ * Allocate a fast interrupt and the associated * deferred processing contexts. */ - TASK_INIT(&adapter->que_task, 0, em_handle_que, adapter); + TASK_INIT(&adapter->que_task, 0, em_handle_que, que); adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT, taskqueue_thread_enqueue, &adapter->tq); taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s que", device_get_nameunit(adapter->dev)); - /* Use a TX only tasklet for local timer */ - TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr); - txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT, - taskqueue_thread_enqueue, &txr->tq); - taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq", - device_get_nameunit(adapter->dev)); TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter); if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET, em_irq_fast, NULL, adapter, &adapter->tag)) != 0) { @@ -2603,8 +2559,7 @@ em_allocate_msix(struct adapter *adapter) { device_t dev = adapter->dev; - struct tx_ring *txr = adapter->tx_rings; - struct rx_ring *rxr = adapter->rx_rings; + struct em_queue *que = adapter->queues; int error, rid, vector = 0; int cpu_id = 0; @@ -2613,94 +2568,51 @@ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); /* First set up ring resources */ - for (int i = 0; i < adapter->num_queues; i++, rxr++, vector++) { + for (int i = 0; i < adapter->num_queues; i++, vector++, que++) { /* RX ring */ rid = vector + 1; - rxr->res = bus_alloc_resource_any(dev, - SYS_RES_IRQ, &rid, RF_ACTIVE); - if (rxr->res == NULL) { + que->res = bus_alloc_resource_any(dev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); + if (que->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " - "RX MSIX Interrupt %d\n", i); + "MSIX QUEUE Interrupt %d\n", i); return (ENXIO); } - if ((error = bus_setup_intr(dev, rxr->res, - INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, - rxr, &rxr->tag)) != 0) { - device_printf(dev, "Failed to register RX handler"); + if ((error = bus_setup_intr(dev, que->res, + INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_que, + que, &que->tag)) != 0) { + device_printf(dev, "Failed to register QUEUE handler"); return (error); } #if __FreeBSD_version >= 800504 - bus_describe_intr(dev, rxr->res, rxr->tag, "rx%d", i); + bus_describe_intr(dev, que->res, que->tag, "que %d", i); #endif - rxr->msix = vector; + que->msix = vector; if (em_last_bind_cpu < 0) em_last_bind_cpu = CPU_FIRST(); cpu_id = em_last_bind_cpu; - bus_bind_intr(dev, rxr->res, cpu_id); + bus_bind_intr(dev, que->res, cpu_id); - TASK_INIT(&rxr->rx_task, 0, em_handle_rx, rxr); - rxr->tq = taskqueue_create_fast("em_rxq", M_NOWAIT, - taskqueue_thread_enqueue, &rxr->tq); - taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq (cpuid %d)", + TASK_INIT(&que->que_task, 0, em_handle_que, que); + que->tq = taskqueue_create_fast("em_queue", M_NOWAIT, + taskqueue_thread_enqueue, &que->tq); + taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que (qid %d)", device_get_nameunit(adapter->dev), cpu_id); /* ** Set the bit to enable interrupt ** in E1000_IMS -- bits 20 and 21 - ** are for RX0 and RX1, note this has + ** are for RX0 and RX1, bits 22 and 23 + ** are for TX0 and TX1. note this has ** NOTHING to do with the MSIX vector */ - rxr->ims = 1 << (20 + i); - adapter->ims |= rxr->ims; - adapter->ivars |= (8 | rxr->msix) << (i * 4); - - em_last_bind_cpu = CPU_NEXT(em_last_bind_cpu); - } - - for (int i = 0; i < adapter->num_queues; i++, txr++, vector++) { - /* TX ring */ - rid = vector + 1; - txr->res = bus_alloc_resource_any(dev, - SYS_RES_IRQ, &rid, RF_ACTIVE); - if (txr->res == NULL) { - device_printf(dev, - "Unable to allocate bus resource: " - "TX MSIX Interrupt %d\n", i); - return (ENXIO); - } - if ((error = bus_setup_intr(dev, txr->res, - INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, - txr, &txr->tag)) != 0) { - device_printf(dev, "Failed to register TX handler"); - return (error); - } -#if __FreeBSD_version >= 800504 - bus_describe_intr(dev, txr->res, txr->tag, "tx%d", i); -#endif - txr->msix = vector; - - if (em_last_bind_cpu < 0) - em_last_bind_cpu = CPU_FIRST(); - cpu_id = em_last_bind_cpu; - bus_bind_intr(dev, txr->res, cpu_id); - - TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr); - txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT, - taskqueue_thread_enqueue, &txr->tq); - taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq (cpuid %d)", - device_get_nameunit(adapter->dev), cpu_id); - /* - ** Set the bit to enable interrupt - ** in E1000_IMS -- bits 22 and 23 - ** are for TX0 and TX1, note this has - ** NOTHING to do with the MSIX vector - */ - txr->ims = 1 << (22 + i); - adapter->ims |= txr->ims; - adapter->ivars |= (8 | txr->msix) << (8 + (i * 4)); + que->ims = (1 << (20 + i)) | (1 << (22 + i)); + adapter->ivars |= (8 | que->msix) << (i * 4); + adapter->ivars |= (8 | que->msix) << (8 + (i * 4)); + adapter->que_mask |= que->ims; em_last_bind_cpu = CPU_NEXT(em_last_bind_cpu); } @@ -2737,41 +2649,23 @@ static void em_free_pci_resources(struct adapter *adapter) { + struct em_queue *que = adapter->queues; device_t dev = adapter->dev; - struct tx_ring *txr; - struct rx_ring *rxr; int rid; /* ** Release all the queue interrupt resources: */ - for (int i = 0; i < adapter->num_queues; i++) { - txr = &adapter->tx_rings[i]; - /* an early abort? */ - if (txr == NULL) - break; - rid = txr->msix +1; - if (txr->tag != NULL) { - bus_teardown_intr(dev, txr->res, txr->tag); - txr->tag = NULL; + for (int i = 0; i < adapter->num_queues; i++, que++) { + rid = que->msix +1; + if (que->tag != NULL) { + bus_teardown_intr(dev, que->res, que->tag); + que->tag = NULL; } - if (txr->res != NULL) + if (que->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, - rid, txr->res); - - rxr = &adapter->rx_rings[i]; - /* an early abort? */ - if (rxr == NULL) - break; - rid = rxr->msix +1; - if (rxr->tag != NULL) { - bus_teardown_intr(dev, rxr->res, rxr->tag); - rxr->tag = NULL; - } - if (rxr->res != NULL) - bus_release_resource(dev, SYS_RES_IRQ, - rid, rxr->res); + rid, que->res); } if (adapter->linkvec) /* we are doing MSIX */ @@ -2819,12 +2713,9 @@ /* ** Try using MSI-X for Hartwell adapters */ - if ((adapter->hw.mac.type == e1000_82574) && - (em_enable_msix == TRUE)) { + if (em_enable_msix == TRUE) { #ifdef EM_MULTIQUEUE adapter->num_queues = (em_num_queues == 1) ? 1 : 2; - if (adapter->num_queues > 1) - em_enable_vectors_82574(adapter); #endif /* Map the MSIX BAR */ int rid = PCIR_BAR(EM_MSIX_BAR); @@ -2839,10 +2730,10 @@ val = pci_msix_count(dev); #ifdef EM_MULTIQUEUE - /* We need 5 vectors in the multiqueue case */ - if (adapter->num_queues > 1 ) { - if (val >= 5) - val = 5; + /* We need 3 vectors in the multiqueue case */ + if (adapter->num_queues > 1) { + if (val >= 3) + val = 3; else { adapter->num_queues = 1; device_printf(adapter->dev, @@ -3379,19 +3270,29 @@ em_allocate_queues(struct adapter *adapter) { device_t dev = adapter->dev; + struct em_queue *que = NULL; struct tx_ring *txr = NULL; struct rx_ring *rxr = NULL; int rsize, tsize, error = E1000_SUCCESS; int txconf = 0, rxconf = 0; + /* First allocate the top level queue structs */ + if (!(adapter->queues = + (struct em_queue *) malloc(sizeof(struct em_queue) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate queue memory\n"); + error = ENOMEM; + goto fail; + } + /* Allocate the TX ring struct memory */ if (!(adapter->tx_rings = (struct tx_ring *) malloc(sizeof(struct tx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); error = ENOMEM; - goto fail; + goto tx_fail; } /* Now allocate the RX */ @@ -3478,6 +3379,16 @@ } } + /* + ** Finally set up the queue holding structs + */ + for (int i = 0; i < adapter->num_queues; i++) { + que = &adapter->queues[i]; + que->adapter = adapter; + que->txr = &adapter->tx_rings[i]; + que->rxr = &adapter->rx_rings[i]; + } + return (0); err_rx_desc: @@ -3492,6 +3403,8 @@ buf_ring_free(txr->br, M_DEVBUF); #endif free(adapter->tx_rings, M_DEVBUF); +tx_fail: + free(adapter->queues, M_DEVBUF); fail: return (error); } @@ -3509,7 +3422,7 @@ { struct adapter *adapter = txr->adapter; device_t dev = adapter->dev; - struct em_txbuffer *txbuf; + struct em_buffer *txbuf; int error, i; /* @@ -3532,7 +3445,7 @@ } if (!(txr->tx_buffers = - (struct em_txbuffer *) malloc(sizeof(struct em_txbuffer) * + (struct em_buffer *) malloc(sizeof(struct em_buffer) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); error = ENOMEM; @@ -3565,7 +3478,7 @@ em_setup_transmit_ring(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; - struct em_txbuffer *txbuf; + struct em_buffer *txbuf; int i; #ifdef DEV_NETMAP struct netmap_slot *slot; @@ -3793,7 +3706,7 @@ em_free_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; - struct em_txbuffer *txbuf; + struct em_buffer *txbuf; INIT_DEBUGOUT("free_transmit_ring: begin"); @@ -3860,7 +3773,7 @@ { struct adapter *adapter = txr->adapter; struct e1000_context_desc *TXD = NULL; - struct em_txbuffer *tx_buffer; + struct em_buffer *tx_buffer; int cur, hdr_len; u32 cmd = 0; u16 offload = 0; @@ -4015,7 +3928,7 @@ { struct adapter *adapter = txr->adapter; struct e1000_context_desc *TXD; - struct em_txbuffer *tx_buffer; + struct em_buffer *tx_buffer; int cur, hdr_len; /* @@ -4093,7 +4006,7 @@ { struct adapter *adapter = txr->adapter; int first, last, done, processed; - struct em_txbuffer *tx_buffer; + struct em_buffer *tx_buffer; struct e1000_tx_desc *tx_desc, *eop_desc; if_t ifp = adapter->ifp; @@ -4210,7 +4123,7 @@ struct adapter *adapter = rxr->adapter; struct mbuf *m; bus_dma_segment_t segs; - struct em_rxbuffer *rxbuf; + struct em_buffer *rxbuf; int i, j, error, nsegs; bool cleaned = FALSE; @@ -4292,10 +4205,10 @@ { struct adapter *adapter = rxr->adapter; device_t dev = adapter->dev; - struct em_rxbuffer *rxbuf; + struct em_buffer *rxbuf; int error; - rxr->rx_buffers = malloc(sizeof(struct em_rxbuffer) * + rxr->rx_buffers = malloc(sizeof(struct em_buffer) * adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); if (rxr->rx_buffers == NULL) { device_printf(dev, "Unable to allocate rx_buffer memory\n"); @@ -4348,7 +4261,7 @@ em_setup_receive_ring(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; - struct em_rxbuffer *rxbuf; + struct em_buffer *rxbuf; bus_dma_segment_t seg[1]; int rsize, nsegs, error = 0; #ifdef DEV_NETMAP @@ -4455,7 +4368,7 @@ for (int i = 0; i < q; ++i) { rxr = &adapter->rx_rings[i]; for (int n = 0; n < adapter->num_rx_desc; n++) { - struct em_rxbuffer *rxbuf; + struct em_buffer *rxbuf; rxbuf = &rxr->rx_buffers[n]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->rxtag, rxbuf->map, @@ -4502,7 +4415,7 @@ em_free_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; - struct em_rxbuffer *rxbuf = NULL; + struct em_buffer *rxbuf = NULL; INIT_DEBUGOUT("free_receive_buffers: begin"); @@ -4544,7 +4457,7 @@ static void em_initialize_receive_unit(struct adapter *adapter) { - struct rx_ring *rxr = adapter->rx_rings; + struct rx_ring *rxr = adapter->rx_rings; if_t ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u32 rctl, rxcsum, rfctl; @@ -4758,9 +4671,10 @@ * For polling we also now return the number of cleaned packets *********************************************************************/ static bool -em_rxeof(struct rx_ring *rxr, int count, int *done) +em_rxeof(struct em_queue *que, int count, int *done) { - struct adapter *adapter = rxr->adapter; + struct adapter *adapter = que->adapter; + struct rx_ring *rxr = que->rxr; if_t ifp = adapter->ifp; struct mbuf *mp, *sendmp; u32 status = 0; @@ -4896,7 +4810,7 @@ static __inline void em_rx_discard(struct rx_ring *rxr, int i) { - struct em_rxbuffer *rbuf; + struct em_buffer *rbuf; rbuf = &rxr->rx_buffers[i]; bus_dmamap_unload(rxr->rxtag, rbuf->map); @@ -4969,7 +4883,7 @@ #endif static void -em_setup_rxdesc(union e1000_rx_desc_extended *rxd, const struct em_rxbuffer *rxbuf) +em_setup_rxdesc(union e1000_rx_desc_extended *rxd, const struct em_buffer *rxbuf) { rxd->read.buffer_addr = htole64(rxbuf->paddr); /* DD bits must be cleared */ @@ -5106,8 +5020,8 @@ u32 ims_mask = IMS_ENABLE_MASK; if (hw->mac.type == e1000_82574) { - E1000_WRITE_REG(hw, EM_EIAC, adapter->ims); - ims_mask |= adapter->ims; + E1000_WRITE_REG(hw, EM_EIAC, adapter->que_mask); + ims_mask |= adapter->que_mask; } E1000_WRITE_REG(hw, E1000_IMS, ims_mask); } @@ -5639,6 +5553,7 @@ struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; + struct em_queue *que = adapter->queues; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); @@ -5686,7 +5601,7 @@ CTLFLAG_RD, &adapter->hw.fc.low_water, 0, "Flow Control Low Watermark"); - for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) { + for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++, que++) { snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "TX Queue Name"); @@ -5702,8 +5617,8 @@ E1000_TDT(txr->me), em_sysctl_reg_handler, "IU", "Transmit Descriptor Tail"); - SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", - CTLFLAG_RD, &txr->tx_irq, + SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "queue_irq", + CTLFLAG_RD, &que->irqs, "Queue MSI-X Transmit Interrupts"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txr->no_desc_avail, @@ -5724,9 +5639,6 @@ E1000_RDT(rxr->me), em_sysctl_reg_handler, "IU", "Receive Descriptor Tail"); - SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq", - CTLFLAG_RD, &rxr->rx_irq, - "Queue MSI-X Receive Interrupts"); } /* MAC stats get their own sub node */ @@ -6147,7 +6059,7 @@ else printf("and ACTIVE\n"); - for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) { + for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { device_printf(dev, "TX Queue %d ------\n", i); device_printf(dev, "hw tdh = %d, hw tdt = %d\n", E1000_READ_REG(&adapter->hw, E1000_TDH(i)), @@ -6168,33 +6080,6 @@ } } -#ifdef EM_MULTIQUEUE -/* - * 82574 only: - * Write a new value to the EEPROM increasing the number of MSIX - * vectors from 3 to 5, for proper multiqueue support. - */ -static void -em_enable_vectors_82574(struct adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - device_t dev = adapter->dev; - u16 edata; - - e1000_read_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata); - printf("Current cap: %#06x\n", edata); - if (((edata & EM_NVM_MSIX_N_MASK) >> EM_NVM_MSIX_N_SHIFT) != 4) { - device_printf(dev, "Writing to eeprom: increasing " - "reported MSIX vectors from 3 to 5...\n"); - edata &= ~(EM_NVM_MSIX_N_MASK); - edata |= 4 << EM_NVM_MSIX_N_SHIFT; - e1000_write_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata); - e1000_update_nvm_checksum(hw); - device_printf(dev, "Writing to eeprom: done\n"); - } -} -#endif - #ifdef DDB DB_COMMAND(em_reset_dev, em_ddb_reset_dev) { Index: sys/dev/netmap/if_em_netmap.h =================================================================== --- sys/dev/netmap/if_em_netmap.h +++ sys/dev/netmap/if_em_netmap.h @@ -45,14 +45,11 @@ { if (adapter->msix > 1) { /* MSIX */ int i; - struct tx_ring *txr = adapter->tx_rings; - struct rx_ring *rxr = adapter->rx_rings; + struct em_queue *que = adapter->queues; - for (i = 0; i < adapter->num_queues; i++, txr++, rxr++) { - taskqueue_block(txr->tq); - taskqueue_drain(txr->tq, &txr->tx_task); - taskqueue_block(rxr->tq); - taskqueue_drain(rxr->tq, &rxr->rx_task); + for (i = 0; i < adapter->num_queues; i++, que++) { + taskqueue_block(que->tq); + taskqueue_drain(que->tq, &que->que_task); } } else { /* legacy */ taskqueue_block(adapter->tq); @@ -66,13 +63,11 @@ em_netmap_unblock_tasks(struct adapter *adapter) { if (adapter->msix > 1) { - struct tx_ring *txr = adapter->tx_rings; - struct rx_ring *rxr = adapter->rx_rings; + struct em_queue *que = adapter->queues; int i; - for (i = 0; i < adapter->num_queues; i++, txr++, rxr++) { - taskqueue_unblock(txr->tq); - taskqueue_unblock(rxr->tq); + for (i = 0; i < adapter->num_queues; i++) { + taskqueue_unblock(que->tq); } } else { /* legacy */ taskqueue_unblock(adapter->tq); @@ -148,7 +143,7 @@ /* device-specific */ struct e1000_tx_desc *curr = &txr->tx_base[nic_i]; - struct em_txbuffer *txbuf = &txr->tx_buffers[nic_i]; + struct em_buffer *txbuf = &txr->tx_buffers[nic_i]; int flags = (slot->flags & NS_REPORT || nic_i == 0 || nic_i == report_frequency) ? E1000_TXD_CMD_RS : 0; @@ -242,7 +237,7 @@ union e1000_rx_desc_extended *curr = &rxr->rx_base[nic_i]; uint32_t staterr = le32toh(curr->wb.upper.status_error); - if ((staterr & E1000_RXD_STAT_DD) == 0) + if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0) break; ring->slot[nm_i].len = le16toh(curr->wb.upper.length); ring->slot[nm_i].flags = slot_flags; @@ -272,7 +267,7 @@ void *addr = PNMB(na, slot, &paddr); union e1000_rx_desc_extended *curr = &rxr->rx_base[nic_i]; - struct em_rxbuffer *rxbuf = &rxr->rx_buffers[nic_i]; + struct em_buffer *rxbuf = &rxr->rx_buffers[nic_i]; if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ goto ring_reset; @@ -322,7 +317,8 @@ na.nm_txsync = em_netmap_txsync; na.nm_rxsync = em_netmap_rxsync; na.nm_register = em_netmap_reg; - na.num_tx_rings = na.num_rx_rings = adapter->num_queues; + na.num_tx_rings = adapter->num_queues; + na.num_rx_rings = adapter->num_queues; netmap_attach(&na); }