Index: head/sys/dev/sfxge/common/ef10_ev.c =================================================================== --- head/sys/dev/sfxge/common/ef10_ev.c (revision 340827) +++ head/sys/dev/sfxge/common/ef10_ev.c (revision 340828) @@ -1,1414 +1,1416 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_MON_STATS #include "mcdi_mon.h" #endif #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_QSTATS #define EFX_EV_QSTAT_INCR(_eep, _stat) \ do { \ (_eep)->ee_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_EV_QSTAT_INCR(_eep, _stat) #endif /* * Non-interrupting event queue requires interrrupting event queue to * refer to for wake-up events even if wake ups are never used. * It could be even non-allocated event queue. */ #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0) static __checkReturn boolean_t ef10_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn efx_rc_t efx_mcdi_set_evq_tmr( __in efx_nic_t *enp, __in uint32_t instance, __in uint32_t mode, __in uint32_t timer_ns) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN, MC_CMD_SET_EVQ_TMR_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_EVQ_TMR; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN; MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns); MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns); MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) { rc = EMSGSIZE; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_init_evq( __in efx_nic_t *enp, __in unsigned int instance, __in efsys_mem_t *esmp, __in size_t nevs, __in uint32_t irq, __in uint32_t us, __in uint32_t flags, __in boolean_t low_latency) { efx_mcdi_req_t req; uint8_t payload[ MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), MC_CMD_INIT_EVQ_OUT_LEN)]; efx_qword_t *dma_addr; uint64_t addr; int npages; int i; boolean_t interrupting; int ev_cut_through; efx_rc_t rc; npages = EFX_EVQ_NBUFS(nevs); if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq); interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); /* * On Huntington RX and TX event batching can only be requested together * (even if the datapath firmware doesn't actually support RX * batching). If event cut through is enabled no RX batching will occur. * * So always enable RX and TX event batching, and enable event cut * through if we want low latency operation. */ switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { case EFX_EVQ_FLAGS_TYPE_AUTO: ev_cut_through = low_latency ? 1 : 0; break; case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: ev_cut_through = 0; break; case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: ev_cut_through = 1; break; default: rc = EINVAL; goto fail2; } MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS, INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting, INIT_EVQ_IN_FLAG_RPTR_DOS, 0, INIT_EVQ_IN_FLAG_INT_ARMD, 0, INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through, INIT_EVQ_IN_FLAG_RX_MERGE, 1, INIT_EVQ_IN_FLAG_TX_MERGE, 1); /* If the value is zero then disable the timer */ if (us == 0) { MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0); } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail3; MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks); } MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE, MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail4; } if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { rc = EMSGSIZE; goto fail5; } /* NOTE: ignore the returned IRQ param as firmware does not set it. */ return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_init_evq_v2( __in efx_nic_t *enp, __in unsigned int instance, __in efsys_mem_t *esmp, __in size_t nevs, __in uint32_t irq, __in uint32_t us, __in uint32_t flags) { efx_mcdi_req_t req; uint8_t payload[ MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), MC_CMD_INIT_EVQ_V2_OUT_LEN)]; boolean_t interrupting; unsigned int evq_type; efx_qword_t *dma_addr; uint64_t addr; int npages; int i; efx_rc_t rc; npages = EFX_EVQ_NBUFS(nevs); if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq); interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { case EFX_EVQ_FLAGS_TYPE_AUTO: evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO; break; case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT; break; case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY; break; default: rc = EINVAL; goto fail2; } MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS, INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting, INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0, INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0, INIT_EVQ_V2_IN_FLAG_TYPE, evq_type); /* If the value is zero then disable the timer */ if (us == 0) { MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0); } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail3; MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks); } MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE, MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail4; } if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) { rc = EMSGSIZE; goto fail5; } /* NOTE: ignore the returned IRQ param as firmware does not set it. */ EFSYS_PROBE1(mcdi_evq_flags, uint32_t, MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS)); return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_fini_evq( __in efx_nic_t *enp, __in uint32_t instance) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN, MC_CMD_FINI_EVQ_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FINI_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: /* * EALREADY is not an error, but indicates that the MC has rebooted and * that the EVQ has already been destroyed. */ if (rc != EALREADY) EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_ev_init( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) return (0); } void ef10_ev_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } __checkReturn efx_rc_t ef10_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint32_t us, __in uint32_t flags, __in efx_evq_t *eep) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t irq; efx_rc_t rc; _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_evq_limit) { rc = EINVAL; goto fail2; } if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail3; } /* Set up the handler table */ eep->ee_rx = ef10_ev_rx; eep->ee_tx = ef10_ev_tx; eep->ee_driver = ef10_ev_driver; eep->ee_drv_gen = ef10_ev_drv_gen; eep->ee_mcdi = ef10_ev_mcdi; /* Set up the event queue */ /* INIT_EVQ expects function-relative vector number */ if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) { irq = index; } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) { irq = index; flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) | EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; } else { irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX; } /* * Interrupts may be raised for events immediately after the queue is * created. See bug58606. */ if (encp->enc_init_evq_v2_supported) { /* * On Medford the low latency license is required to enable RX * and event cut through and to disable RX batching. If event * queue type in flags is auto, we let the firmware decide the * settings to use. If the adapter has a low latency license, * it will choose the best settings for low latency, otherwise * it will choose the best settings for throughput. */ rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags); if (rc != 0) goto fail4; } else { /* * On Huntington we need to specify the settings to use. * If event queue type in flags is auto, we favour throughput * if the adapter is running virtualization supporting firmware * (i.e. the full featured firmware variant) * and latency otherwise. The Ethernet Virtual Bridging * capability is used to make this decision. (Note though that * the low latency firmware variant is also best for * throughput and corresponding type should be specified * to choose it.) */ boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1; rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags, low_latency); if (rc != 0) goto fail5; } return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_ev_qdestroy( __in efx_evq_t *eep) { efx_nic_t *enp = eep->ee_enp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index); } __checkReturn efx_rc_t ef10_ev_qprime( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; uint32_t rptr; efx_dword_t dword; rptr = count & eep->ee_mask; if (enp->en_nic_cfg.enc_bug35388_workaround) { EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS > (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS < (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_POPULATE_DWORD_2(dword, ERF_DD_EVQ_IND_RPTR_FLAGS, EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, ERF_DD_EVQ_IND_RPTR, (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); EFX_POPULATE_DWORD_2(dword, ERF_DD_EVQ_IND_RPTR_FLAGS, EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, ERF_DD_EVQ_IND_RPTR, rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); } else { EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, &dword, B_FALSE); } return (0); } static __checkReturn efx_rc_t efx_mcdi_driver_event( __in efx_nic_t *enp, __in uint32_t evq, __in efx_qword_t data) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN, MC_CMD_DRIVER_EVENT_OUT_LEN)]; efx_rc_t rc; req.emr_cmd = MC_CMD_DRIVER_EVENT; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN; MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq); MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO, EFX_QWORD_FIELD(data, EFX_DWORD_0)); MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI, EFX_QWORD_FIELD(data, EFX_DWORD_1)); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_ev_qpost( __in efx_evq_t *eep, __in uint16_t data) { efx_nic_t *enp = eep->ee_enp; efx_qword_t event; EFX_POPULATE_QWORD_3(event, ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV, ESF_DZ_DRV_SUB_CODE, 0, ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data); (void) efx_mcdi_driver_event(enp, eep->ee_index, event); } __checkReturn efx_rc_t ef10_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us) { efx_nic_t *enp = eep->ee_enp; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_dword_t dword; uint32_t mode; efx_rc_t rc; /* Check that hardware and MCDI use the same timer MODE values */ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS); EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START); EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START); EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF); if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail1; } /* If the value is zero then disable the timer */ if (us == 0) { mode = FFE_CZ_TIMER_MODE_DIS; } else { mode = FFE_CZ_TIMER_MODE_INT_HLDOFF; } if (encp->enc_bug61265_workaround) { uint32_t ns = us * 1000; rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns); if (rc != 0) goto fail2; } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail3; if (encp->enc_bug35388_workaround) { EFX_POPULATE_DWORD_3(dword, ERF_DD_EVQ_IND_TIMER_FLAGS, EFE_DD_EVQ_IND_TIMER_FLAGS, ERF_DD_EVQ_IND_TIMER_MODE, mode, ERF_DD_EVQ_IND_TIMER_VAL, ticks); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, 0); } else { EFX_POPULATE_DWORD_2(dword, ERF_DZ_TC_TIMER_MODE, mode, ERF_DZ_TC_TIMER_VAL, ticks); EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG, eep->ee_index, &dword, 0); } } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_QSTATS void ef10_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) { unsigned int id; for (id = 0; id < EV_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, eep->ee_stat[id]); eep->ee_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ #if EFSYS_OPT_RX_PACKED_STREAM static __checkReturn boolean_t ef10_ev_rx_packed_stream( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t label; uint32_t pkt_count_lbits; uint16_t flags; boolean_t should_abort; efx_evq_rxq_state_t *eersp; unsigned int pkt_count; unsigned int current_id; boolean_t new_buffer; pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE); flags = 0; eersp = &eep->ee_rxq_state[label]; /* * RX_DSC_PTR_LBITS has least significant bits of the global * (not per-buffer) packet counter. It is guaranteed that * maximum number of completed packets fits in lbits-mask. * So, modulo lbits-mask arithmetic should be used to calculate * packet counter increment. */ pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) & EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); eersp->eers_rx_stream_npackets += pkt_count; if (new_buffer) { flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER; eersp->eers_rx_packed_stream_credits++; eersp->eers_rx_read_ptr++; } current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask; /* Check for errors that invalidate checksum and L3/L4 fields */ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { /* RX frame truncated (error flag is misnamed) */ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); flags |= EFX_DISCARD; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { /* Bad Ethernet frame CRC */ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); flags |= EFX_DISCARD; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); deliver: /* If we're not discarding the packet then it is ok */ if (~flags & EFX_DISCARD) EFX_EV_QSTAT_INCR(eep, EV_RX_OK); EFSYS_ASSERT(eecp->eec_rx_ps != NULL); should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count, flags); return (should_abort); } #endif /* EFSYS_OPT_RX_PACKED_STREAM */ static __checkReturn boolean_t ef10_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; uint32_t size; uint32_t label; uint32_t mac_class; uint32_t eth_tag_class; uint32_t l3_class; uint32_t l4_class; uint32_t next_read_lbits; uint16_t flags; boolean_t cont; boolean_t should_abort; efx_evq_rxq_state_t *eersp; unsigned int desc_count; unsigned int last_used_id; EFX_EV_QSTAT_INCR(eep, EV_RX); /* Discard events after RXQ/TXQ errors */ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) return (B_FALSE); /* Basic packet information */ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); eersp = &eep->ee_rxq_state[label]; #if EFSYS_OPT_RX_PACKED_STREAM /* * Packed stream events are very different, * so handle them separately */ if (eersp->eers_rx_packed_stream) return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg)); #endif size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS); cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { /* Drop this event */ return (B_FALSE); } flags = 0; if (cont != 0) { /* * This may be part of a scattered frame, or it may be a * truncated frame if scatter is disabled on this RXQ. * Overlength frames can be received if e.g. a VF is configured * for 1500 MTU but connected to a port set to 9000 MTU * (see bug56567). * FIXME: There is not yet any driver that supports scatter on * Huntington. Scatter support is required for OSX. */ flags |= EFX_PKT_CONT; } if (mac_class == ESE_DZ_MAC_CLASS_UCAST) flags |= EFX_PKT_UNICAST; /* Increment the count of descriptors read */ desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) & EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); eersp->eers_rx_read_ptr += desc_count; /* * FIXME: add error checking to make sure this a batched event. * This could also be an aborted scatter, see Bug36629. */ if (desc_count > 1) { EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); flags |= EFX_PKT_PREFIX_LEN; } /* Calculate the index of the last descriptor consumed */ last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; /* Check for errors that invalidate checksum and L3/L4 fields */ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { /* RX frame truncated (error flag is misnamed) */ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); flags |= EFX_DISCARD; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { /* Bad Ethernet frame CRC */ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); flags |= EFX_DISCARD; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { /* * Hardware parse failed, due to malformed headers * or headers that are too long for the parser. * Headers and checksums must be validated by the host. */ /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */ goto deliver; } if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) || (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) { flags |= EFX_PKT_VLAN_TAGGED; } switch (l3_class) { case ESE_DZ_L3_CLASS_IP4: case ESE_DZ_L3_CLASS_IP4_FRAG: flags |= EFX_PKT_IPV4; if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) { EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); } else { flags |= EFX_CKSUM_IPV4; } if (l4_class == ESE_DZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); flags |= EFX_PKT_TCP; } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); flags |= EFX_PKT_UDP; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); } break; case ESE_DZ_L3_CLASS_IP6: case ESE_DZ_L3_CLASS_IP6_FRAG: flags |= EFX_PKT_IPV6; if (l4_class == ESE_DZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); flags |= EFX_PKT_TCP; } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); flags |= EFX_PKT_UDP; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); } break; default: EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); break; } if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) { if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); } else { flags |= EFX_CKSUM_TCPUDP; } } deliver: /* If we're not discarding the packet then it is ok */ if (~flags & EFX_DISCARD) EFX_EV_QSTAT_INCR(eep, EV_RX_OK); EFSYS_ASSERT(eecp->eec_rx != NULL); should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags); return (should_abort); } static __checkReturn boolean_t ef10_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; uint32_t id; uint32_t label; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_TX); /* Discard events after RXQ/TXQ errors */ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) return (B_FALSE); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) { /* Drop this event */ return (B_FALSE); } /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX); label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL); EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); EFSYS_ASSERT(eecp->eec_tx != NULL); should_abort = eecp->eec_tx(arg, label, id); return (should_abort); } static __checkReturn boolean_t ef10_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { unsigned int code; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRIVER); should_abort = B_FALSE; code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE); switch (code) { case ESE_DZ_DRV_TIMER_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID); EFSYS_ASSERT(eecp->eec_timer != NULL); should_abort = eecp->eec_timer(arg, id); break; } case ESE_DZ_DRV_WAKE_UP_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID); EFSYS_ASSERT(eecp->eec_wake_up != NULL); should_abort = eecp->eec_wake_up(arg, id); break; } case ESE_DZ_DRV_START_UP_EV: EFSYS_ASSERT(eecp->eec_initialized != NULL); should_abort = eecp->eec_initialized(arg); break; default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); break; } return (should_abort); } static __checkReturn boolean_t ef10_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t data; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); should_abort = B_FALSE; data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0); if (data >= ((uint32_t)1 << 16)) { EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); return (B_TRUE); } EFSYS_ASSERT(eecp->eec_software != NULL); should_abort = eecp->eec_software(arg, (uint16_t)data); return (should_abort); } static __checkReturn boolean_t ef10_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; unsigned int code; boolean_t should_abort = B_FALSE; EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); switch (code) { case MCDI_EVENT_CODE_BADSSERT: efx_mcdi_ev_death(enp, EINTR); break; case MCDI_EVENT_CODE_CMDDONE: efx_mcdi_ev_cpl(enp, MCDI_EV_FIELD(eqp, CMDDONE_SEQ), MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); break; #if EFSYS_OPT_MCDI_PROXY_AUTH case MCDI_EVENT_CODE_PROXY_RESPONSE: /* * This event notifies a function that an authorization request * has been processed. If the request was authorized then the * function can now re-send the original MCDI request. * See SF-113652-SW "SR-IOV Proxied Network Access Control". */ efx_mcdi_ev_proxy_response(enp, MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE), MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC)); break; #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ case MCDI_EVENT_CODE_LINKCHANGE: { efx_link_mode_t link_mode; ef10_phy_link_ev(enp, eqp, &link_mode); should_abort = eecp->eec_link_change(arg, link_mode); break; } case MCDI_EVENT_CODE_SENSOREVT: { #if EFSYS_OPT_MON_STATS efx_mon_stat_t id; efx_mon_stat_value_t value; efx_rc_t rc; /* Decode monitor stat for MCDI sensor (if supported) */ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) { /* Report monitor stat change */ should_abort = eecp->eec_monitor(arg, id, value); } else if (rc == ENOTSUP) { should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_SENSOREVT, MCDI_EV_FIELD(eqp, DATA)); } else { EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ } #endif break; } case MCDI_EVENT_CODE_SCHEDERR: /* Informational only */ break; case MCDI_EVENT_CODE_REBOOT: /* Falcon/Siena only (should not been seen with Huntington). */ efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MC_REBOOT: /* MC_REBOOT event is used for Huntington (EF10) and later. */ efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: #if EFSYS_OPT_MAC_STATS if (eecp->eec_mac_stats != NULL) { eecp->eec_mac_stats(arg, MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); } #endif break; case MCDI_EVENT_CODE_FWALERT: { uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_FWALERT_SRAM, MCDI_EV_FIELD(eqp, FWALERT_DATA)); else should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_FWALERT, MCDI_EV_FIELD(eqp, DATA)); break; } case MCDI_EVENT_CODE_TX_ERR: { /* * After a TXQ error is detected, firmware sends a TX_ERR event. * This may be followed by TX completions (which we discard), * and then finally by a TX_FLUSH event. Firmware destroys the * TXQ automatically after sending the TX_FLUSH event. */ enp->en_reset_flags |= EFX_RESET_TXQ_ERR; EFSYS_PROBE2(tx_descq_err, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); /* Inform the driver that a reset is required. */ eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR, MCDI_EV_FIELD(eqp, TX_ERR_DATA)); break; } case MCDI_EVENT_CODE_TX_FLUSH: { uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ); /* * EF10 firmware sends two TX_FLUSH events: one to the txq's * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set). * We want to wait for all completions, so ignore the events * with TX_FLUSH_TO_DRIVER. */ if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) { should_abort = B_FALSE; break; } EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); should_abort = eecp->eec_txq_flush_done(arg, txq_index); break; } case MCDI_EVENT_CODE_RX_ERR: { /* * After an RXQ error is detected, firmware sends an RX_ERR * event. This may be followed by RX events (which we discard), * and then finally by an RX_FLUSH event. Firmware destroys the * RXQ automatically after sending the RX_FLUSH event. */ enp->en_reset_flags |= EFX_RESET_RXQ_ERR; EFSYS_PROBE2(rx_descq_err, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); /* Inform the driver that a reset is required. */ eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR, MCDI_EV_FIELD(eqp, RX_ERR_DATA)); break; } case MCDI_EVENT_CODE_RX_FLUSH: { uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ); /* * EF10 firmware sends two RX_FLUSH events: one to the rxq's * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set). * We want to wait for all completions, so ignore the events * with RX_FLUSH_TO_DRIVER. */ if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) { should_abort = B_FALSE; break; } EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); break; } default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); break; } return (should_abort); } void ef10_ev_rxlabel_init( __in efx_evq_t *eep, __in efx_rxq_t *erp, __in unsigned int label, - __in boolean_t packed_stream) + __in efx_rxq_type_t type) { efx_evq_rxq_state_t *eersp; + boolean_t packed_stream = (type >= EFX_RXQ_TYPE_PACKED_STREAM_1M) && + (type <= EFX_RXQ_TYPE_PACKED_STREAM_64K); EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); eersp = &eep->ee_rxq_state[label]; EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0); #if EFSYS_OPT_RX_PACKED_STREAM /* * For packed stream modes, the very first event will * have a new buffer flag set, so it will be incremented, * yielding the correct pointer. That results in a simpler * code than trying to detect start-of-the-world condition * in the event handler. */ eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0; #else eersp->eers_rx_read_ptr = 0; #endif eersp->eers_rx_mask = erp->er_mask; #if EFSYS_OPT_RX_PACKED_STREAM eersp->eers_rx_stream_npackets = 0; eersp->eers_rx_packed_stream = packed_stream; if (packed_stream) { eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) / EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT, EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE); EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0); /* * A single credit is allocated to the queue when it is started. * It is immediately spent by the first packet which has NEW * BUFFER flag set, though, but still we shall take into * account, as to not wrap around the maximum number of credits * accidentally */ eersp->eers_rx_packed_stream_credits--; EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=, EFX_RX_PACKED_STREAM_MAX_CREDITS); } #else EFSYS_ASSERT(!packed_stream); #endif } void ef10_ev_rxlabel_fini( __in efx_evq_t *eep, __in unsigned int label) { efx_evq_rxq_state_t *eersp; EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); eersp = &eep->ee_rxq_state[label]; EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0); eersp->eers_rx_read_ptr = 0; eersp->eers_rx_mask = 0; #if EFSYS_OPT_RX_PACKED_STREAM eersp->eers_rx_stream_npackets = 0; eersp->eers_rx_packed_stream = B_FALSE; eersp->eers_rx_packed_stream_credits = 0; #endif } #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: head/sys/dev/sfxge/common/ef10_impl.h =================================================================== --- head/sys/dev/sfxge/common/ef10_impl.h (revision 340827) +++ head/sys/dev/sfxge/common/ef10_impl.h (revision 340828) @@ -1,1219 +1,1219 @@ /*- * Copyright (c) 2015-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_EF10_IMPL_H #define _SYS_EF10_IMPL_H #ifdef __cplusplus extern "C" { #endif #if (EFSYS_OPT_HUNTINGTON && EFSYS_OPT_MEDFORD) #define EF10_MAX_PIOBUF_NBUFS MAX(HUNT_PIOBUF_NBUFS, MEDFORD_PIOBUF_NBUFS) #elif EFSYS_OPT_HUNTINGTON #define EF10_MAX_PIOBUF_NBUFS HUNT_PIOBUF_NBUFS #elif EFSYS_OPT_MEDFORD #define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS #endif /* * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could * possibly be increased, or the write size reported by newer firmware used * instead. */ #define EF10_NVRAM_CHUNK 0x80 /* Alignment requirement for value written to RX WPTR: * the WPTR must be aligned to an 8 descriptor boundary */ #define EF10_RX_WPTR_ALIGN 8 /* * Max byte offset into the packet the TCP header must start for the hardware * to be able to parse the packet correctly. */ #define EF10_TCP_HEADER_OFFSET_LIMIT 208 /* Invalid RSS context handle */ #define EF10_RSS_CONTEXT_INVALID (0xffffffff) /* EV */ __checkReturn efx_rc_t ef10_ev_init( __in efx_nic_t *enp); void ef10_ev_fini( __in efx_nic_t *enp); __checkReturn efx_rc_t ef10_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint32_t us, __in uint32_t flags, __in efx_evq_t *eep); void ef10_ev_qdestroy( __in efx_evq_t *eep); __checkReturn efx_rc_t ef10_ev_qprime( __in efx_evq_t *eep, __in unsigned int count); void ef10_ev_qpost( __in efx_evq_t *eep, __in uint16_t data); __checkReturn efx_rc_t ef10_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us); #if EFSYS_OPT_QSTATS void ef10_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ void ef10_ev_rxlabel_init( __in efx_evq_t *eep, __in efx_rxq_t *erp, __in unsigned int label, - __in boolean_t packed_stream); + __in efx_rxq_type_t type); void ef10_ev_rxlabel_fini( __in efx_evq_t *eep, __in unsigned int label); /* INTR */ __checkReturn efx_rc_t ef10_intr_init( __in efx_nic_t *enp, __in efx_intr_type_t type, __in efsys_mem_t *esmp); void ef10_intr_enable( __in efx_nic_t *enp); void ef10_intr_disable( __in efx_nic_t *enp); void ef10_intr_disable_unlocked( __in efx_nic_t *enp); __checkReturn efx_rc_t ef10_intr_trigger( __in efx_nic_t *enp, __in unsigned int level); void ef10_intr_status_line( __in efx_nic_t *enp, __out boolean_t *fatalp, __out uint32_t *qmaskp); void ef10_intr_status_message( __in efx_nic_t *enp, __in unsigned int message, __out boolean_t *fatalp); void ef10_intr_fatal( __in efx_nic_t *enp); void ef10_intr_fini( __in efx_nic_t *enp); /* NIC */ extern __checkReturn efx_rc_t ef10_nic_probe( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_nic_set_drv_limits( __inout efx_nic_t *enp, __in efx_drv_limits_t *edlp); extern __checkReturn efx_rc_t ef10_nic_get_vi_pool( __in efx_nic_t *enp, __out uint32_t *vi_countp); extern __checkReturn efx_rc_t ef10_nic_get_bar_region( __in efx_nic_t *enp, __in efx_nic_region_t region, __out uint32_t *offsetp, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nic_reset( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_nic_init( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG extern __checkReturn efx_rc_t ef10_nic_register_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern void ef10_nic_fini( __in efx_nic_t *enp); extern void ef10_nic_unprobe( __in efx_nic_t *enp); /* MAC */ extern __checkReturn efx_rc_t ef10_mac_poll( __in efx_nic_t *enp, __out efx_link_mode_t *link_modep); extern __checkReturn efx_rc_t ef10_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp); extern __checkReturn efx_rc_t ef10_mac_addr_set( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mac_pdu_set( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mac_pdu_get( __in efx_nic_t *enp, __out size_t *pdu); extern __checkReturn efx_rc_t ef10_mac_reconfigure( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mac_multicast_list_set( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mac_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss); extern void ef10_mac_filter_default_rxq_clear( __in efx_nic_t *enp); #if EFSYS_OPT_LOOPBACK extern __checkReturn efx_rc_t ef10_mac_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t loopback_type); #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS extern __checkReturn efx_rc_t ef10_mac_stats_get_mask( __in efx_nic_t *enp, __inout_bcount(mask_size) uint32_t *maskp, __in size_t mask_size); extern __checkReturn efx_rc_t ef10_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp); #endif /* EFSYS_OPT_MAC_STATS */ /* MCDI */ #if EFSYS_OPT_MCDI extern __checkReturn efx_rc_t ef10_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *mtp); extern void ef10_mcdi_fini( __in efx_nic_t *enp); extern void ef10_mcdi_send_request( __in efx_nic_t *enp, __in_bcount(hdr_len) void *hdrp, __in size_t hdr_len, __in_bcount(sdu_len) void *sdup, __in size_t sdu_len); extern __checkReturn boolean_t ef10_mcdi_poll_response( __in efx_nic_t *enp); extern void ef10_mcdi_read_response( __in efx_nic_t *enp, __out_bcount(length) void *bufferp, __in size_t offset, __in size_t length); extern efx_rc_t ef10_mcdi_poll_reboot( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mcdi_feature_supported( __in efx_nic_t *enp, __in efx_mcdi_feature_id_t id, __out boolean_t *supportedp); extern void ef10_mcdi_get_timeout( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __out uint32_t *timeoutp); #endif /* EFSYS_OPT_MCDI */ /* NVRAM */ #if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD extern __checkReturn efx_rc_t ef10_nvram_buf_read_tlv( __in efx_nic_t *enp, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nvram_buf_write_tlv( __inout_bcount(partn_size) caddr_t partn_data, __in size_t partn_size, __in uint32_t tag, __in_bcount(tag_size) caddr_t tag_data, __in size_t tag_size, __out size_t *total_lengthp); extern __checkReturn efx_rc_t ef10_nvram_partn_read_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nvram_partn_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_write_segment_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t all_segments); extern __checkReturn efx_rc_t ef10_nvram_partn_lock( __in efx_nic_t *enp, __in uint32_t partn); extern __checkReturn efx_rc_t ef10_nvram_partn_unlock( __in efx_nic_t *enp, __in uint32_t partn, __out_opt uint32_t *resultp); #endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */ #if EFSYS_OPT_NVRAM #if EFSYS_OPT_DIAG extern __checkReturn efx_rc_t ef10_nvram_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern __checkReturn efx_rc_t ef10_nvram_type_to_partn( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *partnp); extern __checkReturn efx_rc_t ef10_nvram_partn_size( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nvram_partn_rw_start( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *chunk_sizep); extern __checkReturn efx_rc_t ef10_nvram_partn_read_mode( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size, __in uint32_t mode); extern __checkReturn efx_rc_t ef10_nvram_partn_read( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_read_backup( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_erase( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_write( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_rw_finish( __in efx_nic_t *enp, __in uint32_t partn, __out_opt uint32_t *verify_resultp); extern __checkReturn efx_rc_t ef10_nvram_partn_get_version( __in efx_nic_t *enp, __in uint32_t partn, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]); extern __checkReturn efx_rc_t ef10_nvram_partn_set_version( __in efx_nic_t *enp, __in uint32_t partn, __in_ecount(4) uint16_t version[4]); extern __checkReturn efx_rc_t ef10_nvram_buffer_validate( __in efx_nic_t *enp, __in uint32_t partn, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size); extern __checkReturn efx_rc_t ef10_nvram_buffer_create( __in efx_nic_t *enp, __in uint16_t partn_type, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size); extern __checkReturn efx_rc_t ef10_nvram_buffer_find_item_start( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __out uint32_t *startp ); extern __checkReturn efx_rc_t ef10_nvram_buffer_find_end( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *endp ); extern __checkReturn __success(return != B_FALSE) boolean_t ef10_nvram_buffer_find_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, __out uint32_t *lengthp ); extern __checkReturn efx_rc_t ef10_nvram_buffer_get_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(item_max_size, *lengthp) caddr_t itemp, __in size_t item_max_size, __out uint32_t *lengthp ); extern __checkReturn efx_rc_t ef10_nvram_buffer_insert_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, __out uint32_t *lengthp ); extern __checkReturn efx_rc_t ef10_nvram_buffer_delete_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end ); extern __checkReturn efx_rc_t ef10_nvram_buffer_finish( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ); #endif /* EFSYS_OPT_NVRAM */ /* PHY */ typedef struct ef10_link_state_s { uint32_t els_adv_cap_mask; uint32_t els_lp_cap_mask; unsigned int els_fcntl; efx_link_mode_t els_link_mode; #if EFSYS_OPT_LOOPBACK efx_loopback_type_t els_loopback; #endif boolean_t els_mac_up; } ef10_link_state_t; extern void ef10_phy_link_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_link_mode_t *link_modep); extern __checkReturn efx_rc_t ef10_phy_get_link( __in efx_nic_t *enp, __out ef10_link_state_t *elsp); extern __checkReturn efx_rc_t ef10_phy_power( __in efx_nic_t *enp, __in boolean_t on); extern __checkReturn efx_rc_t ef10_phy_reconfigure( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_phy_verify( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip); #if EFSYS_OPT_PHY_STATS extern __checkReturn efx_rc_t ef10_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_BIST extern __checkReturn efx_rc_t ef10_bist_enable_offline( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type); extern __checkReturn efx_rc_t ef10_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt __drv_when(count > 0, __notnull) uint32_t *value_maskp, __out_ecount_opt(count) __drv_when(count > 0, __notnull) unsigned long *valuesp, __in size_t count); extern void ef10_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type); #endif /* EFSYS_OPT_BIST */ /* TX */ extern __checkReturn efx_rc_t ef10_tx_init( __in efx_nic_t *enp); extern void ef10_tx_fini( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __in efx_txq_t *etp, __out unsigned int *addedp); extern void ef10_tx_qdestroy( __in efx_txq_t *etp); extern __checkReturn efx_rc_t ef10_tx_qpost( __in efx_txq_t *etp, __in_ecount(n) efx_buffer_t *eb, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); extern void ef10_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed); #if EFSYS_OPT_RX_PACKED_STREAM extern void ef10_rx_qpush_ps_credits( __in efx_rxq_t *erp); extern __checkReturn uint8_t * ef10_rx_qps_packet_info( __in efx_rxq_t *erp, __in uint8_t *buffer, __in uint32_t buffer_length, __in uint32_t current_offset, __out uint16_t *lengthp, __out uint32_t *next_offsetp, __out uint32_t *timestamp); #endif extern __checkReturn efx_rc_t ef10_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns); extern __checkReturn efx_rc_t ef10_tx_qflush( __in efx_txq_t *etp); extern void ef10_tx_qenable( __in efx_txq_t *etp); extern __checkReturn efx_rc_t ef10_tx_qpio_enable( __in efx_txq_t *etp); extern void ef10_tx_qpio_disable( __in efx_txq_t *etp); extern __checkReturn efx_rc_t ef10_tx_qpio_write( __in efx_txq_t *etp, __in_ecount(buf_length) uint8_t *buffer, __in size_t buf_length, __in size_t pio_buf_offset); extern __checkReturn efx_rc_t ef10_tx_qpio_post( __in efx_txq_t *etp, __in size_t pkt_length, __in unsigned int completed, __inout unsigned int *addedp); extern __checkReturn efx_rc_t ef10_tx_qdesc_post( __in efx_txq_t *etp, __in_ecount(n) efx_desc_t *ed, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); extern void ef10_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp); extern void ef10_tx_qdesc_tso_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint8_t tcp_flags, __out efx_desc_t *edp); extern void ef10_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, __in int count); extern void ef10_tx_qdesc_vlantci_create( __in efx_txq_t *etp, __in uint16_t vlan_tci, __out efx_desc_t *edp); #if EFSYS_OPT_QSTATS extern void ef10_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ typedef uint32_t efx_piobuf_handle_t; #define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t) -1) extern __checkReturn efx_rc_t ef10_nic_pio_alloc( __inout efx_nic_t *enp, __out uint32_t *bufnump, __out efx_piobuf_handle_t *handlep, __out uint32_t *blknump, __out uint32_t *offsetp, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nic_pio_free( __inout efx_nic_t *enp, __in uint32_t bufnum, __in uint32_t blknum); extern __checkReturn efx_rc_t ef10_nic_pio_link( __inout efx_nic_t *enp, __in uint32_t vi_index, __in efx_piobuf_handle_t handle); extern __checkReturn efx_rc_t ef10_nic_pio_unlink( __inout efx_nic_t *enp, __in uint32_t vi_index); /* VPD */ #if EFSYS_OPT_VPD extern __checkReturn efx_rc_t ef10_vpd_init( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_vpd_size( __in efx_nic_t *enp, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp); extern __checkReturn efx_rc_t ef10_vpd_set( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp); extern __checkReturn efx_rc_t ef10_vpd_next( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp); extern __checkReturn efx_rc_t ef10_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern void ef10_vpd_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_VPD */ /* RX */ extern __checkReturn efx_rc_t ef10_rx_init( __in efx_nic_t *enp); #if EFSYS_OPT_RX_SCATTER extern __checkReturn efx_rc_t ef10_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size); #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE extern __checkReturn efx_rc_t ef10_rx_scale_context_alloc( __in efx_nic_t *enp, __in efx_rx_scale_context_type_t type, __in uint32_t num_queues, __out uint32_t *rss_contextp); extern __checkReturn efx_rc_t ef10_rx_scale_context_free( __in efx_nic_t *enp, __in uint32_t rss_context); extern __checkReturn efx_rc_t ef10_rx_scale_mode_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert); extern __checkReturn efx_rc_t ef10_rx_scale_key_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) uint8_t *key, __in size_t n); extern __checkReturn efx_rc_t ef10_rx_scale_tbl_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) unsigned int *table, __in size_t n); extern __checkReturn uint32_t ef10_rx_prefix_hash( __in efx_nic_t *enp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer); #endif /* EFSYS_OPT_RX_SCALE */ extern __checkReturn efx_rc_t ef10_rx_prefix_pktlen( __in efx_nic_t *enp, __in uint8_t *buffer, __out uint16_t *lengthp); extern void ef10_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added); extern void ef10_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp); extern __checkReturn efx_rc_t ef10_rx_qflush( __in efx_rxq_t *erp); extern void ef10_rx_qenable( __in efx_rxq_t *erp); extern __checkReturn efx_rc_t ef10_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __in efx_rxq_t *erp); extern void ef10_rx_qdestroy( __in efx_rxq_t *erp); extern void ef10_rx_fini( __in efx_nic_t *enp); #if EFSYS_OPT_FILTER typedef struct ef10_filter_handle_s { uint32_t efh_lo; uint32_t efh_hi; } ef10_filter_handle_t; typedef struct ef10_filter_entry_s { uintptr_t efe_spec; /* pointer to filter spec plus busy bit */ ef10_filter_handle_t efe_handle; } ef10_filter_entry_t; /* * BUSY flag indicates that an update is in progress. * AUTO_OLD flag is used to mark and sweep MAC packet filters. */ #define EFX_EF10_FILTER_FLAG_BUSY 1U #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2U #define EFX_EF10_FILTER_FLAGS 3U /* * Size of the hash table used by the driver. Doesn't need to be the * same size as the hardware's table. */ #define EFX_EF10_FILTER_TBL_ROWS 8192 /* Only need to allow for one directed and one unknown unicast filter */ #define EFX_EF10_FILTER_UNICAST_FILTERS_MAX 2 /* Allow for the broadcast address to be added to the multicast list */ #define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1) /* * For encapsulated packets, there is one filter each for each combination of * IPv4 or IPv6 outer frame, VXLAN, GENEVE or NVGRE packet type, and unicast or * multicast inner frames. */ #define EFX_EF10_FILTER_ENCAP_FILTERS_MAX 12 typedef struct ef10_filter_table_s { ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS]; efx_rxq_t *eft_default_rxq; boolean_t eft_using_rss; uint32_t eft_unicst_filter_indexes[ EFX_EF10_FILTER_UNICAST_FILTERS_MAX]; uint32_t eft_unicst_filter_count; uint32_t eft_mulcst_filter_indexes[ EFX_EF10_FILTER_MULTICAST_FILTERS_MAX]; uint32_t eft_mulcst_filter_count; boolean_t eft_using_all_mulcst; uint32_t eft_encap_filter_indexes[ EFX_EF10_FILTER_ENCAP_FILTERS_MAX]; uint32_t eft_encap_filter_count; } ef10_filter_table_t; __checkReturn efx_rc_t ef10_filter_init( __in efx_nic_t *enp); void ef10_filter_fini( __in efx_nic_t *enp); __checkReturn efx_rc_t ef10_filter_restore( __in efx_nic_t *enp); __checkReturn efx_rc_t ef10_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace); __checkReturn efx_rc_t ef10_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec); extern __checkReturn efx_rc_t ef10_filter_supported_filters( __in efx_nic_t *enp, __out_ecount(buffer_length) uint32_t *buffer, __in size_t buffer_length, __out size_t *list_lengthp); extern __checkReturn efx_rc_t ef10_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count); extern void ef10_filter_get_default_rxq( __in efx_nic_t *enp, __out efx_rxq_t **erpp, __out boolean_t *using_rss); extern void ef10_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss); extern void ef10_filter_default_rxq_clear( __in efx_nic_t *enp); #endif /* EFSYS_OPT_FILTER */ extern __checkReturn efx_rc_t efx_mcdi_get_function_info( __in efx_nic_t *enp, __out uint32_t *pfp, __out_opt uint32_t *vfp); extern __checkReturn efx_rc_t efx_mcdi_privilege_mask( __in efx_nic_t *enp, __in uint32_t pf, __in uint32_t vf, __out uint32_t *maskp); extern __checkReturn efx_rc_t efx_mcdi_get_port_assignment( __in efx_nic_t *enp, __out uint32_t *portp); extern __checkReturn efx_rc_t efx_mcdi_get_port_modes( __in efx_nic_t *enp, __out uint32_t *modesp, __out_opt uint32_t *current_modep); extern __checkReturn efx_rc_t ef10_nic_get_port_mode_bandwidth( __in uint32_t port_mode, __out uint32_t *bandwidth_mbpsp); extern __checkReturn efx_rc_t efx_mcdi_get_mac_address_pf( __in efx_nic_t *enp, __out_ecount_opt(6) uint8_t mac_addrp[6]); extern __checkReturn efx_rc_t efx_mcdi_get_mac_address_vf( __in efx_nic_t *enp, __out_ecount_opt(6) uint8_t mac_addrp[6]); extern __checkReturn efx_rc_t efx_mcdi_get_clock( __in efx_nic_t *enp, __out uint32_t *sys_freqp, __out uint32_t *dpcpu_freqp); extern __checkReturn efx_rc_t efx_mcdi_get_vector_cfg( __in efx_nic_t *enp, __out_opt uint32_t *vec_basep, __out_opt uint32_t *pf_nvecp, __out_opt uint32_t *vf_nvecp); extern __checkReturn efx_rc_t ef10_get_datapath_caps( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_get_privilege_mask( __in efx_nic_t *enp, __out uint32_t *maskp); extern __checkReturn efx_rc_t ef10_external_port_mapping( __in efx_nic_t *enp, __in uint32_t port, __out uint8_t *external_portp); #if EFSYS_OPT_RX_PACKED_STREAM /* Data space per credit in packed stream mode */ #define EFX_RX_PACKED_STREAM_MEM_PER_CREDIT (1 << 16) /* * Received packets are always aligned at this boundary. Also there always * exists a gap of this size between packets. * (see SF-112241-TC, 4.5) */ #define EFX_RX_PACKED_STREAM_ALIGNMENT 64 /* * Size of a pseudo-header prepended to received packets * in packed stream mode */ #define EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE 8 /* Minimum space for packet in packed stream mode */ #define EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE \ P2ROUNDUP(EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE + \ EFX_MAC_PDU_MIN + \ EFX_RX_PACKED_STREAM_ALIGNMENT, \ EFX_RX_PACKED_STREAM_ALIGNMENT) /* Maximum number of credits */ #define EFX_RX_PACKED_STREAM_MAX_CREDITS 127 #endif /* EFSYS_OPT_RX_PACKED_STREAM */ #ifdef __cplusplus } #endif #endif /* _SYS_EF10_IMPL_H */ Index: head/sys/dev/sfxge/common/ef10_rx.c =================================================================== --- head/sys/dev/sfxge/common/ef10_rx.c (revision 340827) +++ head/sys/dev/sfxge/common/ef10_rx.c (revision 340828) @@ -1,1050 +1,1050 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD static __checkReturn efx_rc_t efx_mcdi_init_rxq( __in efx_nic_t *enp, __in uint32_t size, __in uint32_t target_evq, __in uint32_t label, __in uint32_t instance, __in efsys_mem_t *esmp, __in boolean_t disable_scatter, __in uint32_t ps_bufsize) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN, MC_CMD_INIT_RXQ_EXT_OUT_LEN)]; int npages = EFX_RXQ_NBUFS(size); int i; efx_qword_t *dma_addr; uint64_t addr; efx_rc_t rc; uint32_t dma_mode; /* If this changes, then the payload size might need to change. */ EFSYS_ASSERT3U(MC_CMD_INIT_RXQ_OUT_LEN, ==, 0); EFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS); if (ps_bufsize > 0) dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM; else dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_RXQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_RXQ_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_RXQ_EXT_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, size); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance); MCDI_IN_POPULATE_DWORD_8(req, INIT_RXQ_EXT_IN_FLAGS, INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0, INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0, INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0, INIT_RXQ_EXT_IN_CRC_MODE, 0, INIT_RXQ_EXT_IN_FLAG_PREFIX, 1, INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter, INIT_RXQ_EXT_IN_DMA_MODE, dma_mode, INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_fini_rxq( __in efx_nic_t *enp, __in uint32_t instance) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN, MC_CMD_FINI_RXQ_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FINI_RXQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN; MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: /* * EALREADY is not an error, but indicates that the MC has rebooted and * that the RXQ has already been destroyed. */ if (rc != EALREADY) EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_RX_SCALE static __checkReturn efx_rc_t efx_mcdi_rss_context_alloc( __in efx_nic_t *enp, __in efx_rx_scale_context_type_t type, __in uint32_t num_queues, __out uint32_t *rss_contextp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)]; uint32_t rss_context; uint32_t context_type; efx_rc_t rc; if (num_queues > EFX_MAXRSS) { rc = EINVAL; goto fail1; } switch (type) { case EFX_RX_SCALE_EXCLUSIVE: context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE; break; case EFX_RX_SCALE_SHARED: context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; break; default: rc = EINVAL; goto fail2; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, EVB_PORT_ID_ASSIGNED); MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type); /* NUM_QUEUES is only used to validate indirection table offsets */ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) { rc = EMSGSIZE; goto fail4; } rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); if (rss_context == EF10_RSS_CONTEXT_INVALID) { rc = ENOENT; goto fail5; } *rss_contextp = rss_context; return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE static efx_rc_t efx_mcdi_rss_context_free( __in efx_nic_t *enp, __in uint32_t rss_context) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN, MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)]; efx_rc_t rc; if (rss_context == EF10_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE static efx_rc_t efx_mcdi_rss_context_set_flags( __in efx_nic_t *enp, __in uint32_t rss_context, __in efx_rx_hash_type_t type) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN, MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)]; efx_rc_t rc; if (rss_context == EF10_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, rss_context); MCDI_IN_POPULATE_DWORD_4(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN, (type & EFX_RX_HASH_IPV4) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN, (type & EFX_RX_HASH_TCPIPV4) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN, (type & EFX_RX_HASH_IPV6) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN, (type & EFX_RX_HASH_TCPIPV6) ? 1 : 0); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE static efx_rc_t efx_mcdi_rss_context_set_key( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) uint8_t *key, __in size_t n) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN, MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)]; efx_rc_t rc; if (rss_context == EF10_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, rss_context); EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) { rc = EINVAL; goto fail2; } memcpy(MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY), key, n); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE static efx_rc_t efx_mcdi_rss_context_set_table( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) unsigned int *table, __in size_t n) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN, MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)]; uint8_t *req_table; int i, rc; if (rss_context == EF10_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, rss_context); req_table = MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE); for (i = 0; i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN; i++) { req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ __checkReturn efx_rc_t ef10_rx_init( __in efx_nic_t *enp) { #if EFSYS_OPT_RX_SCALE if (efx_mcdi_rss_context_alloc(enp, EFX_RX_SCALE_EXCLUSIVE, EFX_MAXRSS, &enp->en_rss_context) == 0) { /* * Allocated an exclusive RSS context, which allows both the * indirection table and key to be modified. */ enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE; enp->en_hash_support = EFX_RX_HASH_AVAILABLE; } else { /* * Failed to allocate an exclusive RSS context. Continue * operation without support for RSS. The pseudo-header in * received packets will not contain a Toeplitz hash value. */ enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE; enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE; } #endif /* EFSYS_OPT_RX_SCALE */ return (0); } #if EFSYS_OPT_RX_SCATTER __checkReturn efx_rc_t ef10_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size) { _NOTE(ARGUNUSED(enp, buf_size)) return (0); } #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t ef10_rx_scale_context_alloc( __in efx_nic_t *enp, __in efx_rx_scale_context_type_t type, __in uint32_t num_queues, __out uint32_t *rss_contextp) { efx_rc_t rc; rc = efx_mcdi_rss_context_alloc(enp, type, num_queues, rss_contextp); if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t ef10_rx_scale_context_free( __in efx_nic_t *enp, __in uint32_t rss_context) { efx_rc_t rc; rc = efx_mcdi_rss_context_free(enp, rss_context); if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t ef10_rx_scale_mode_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert) { efx_rc_t rc; EFSYS_ASSERT3U(alg, ==, EFX_RX_HASHALG_TOEPLITZ); EFSYS_ASSERT3U(insert, ==, B_TRUE); if ((alg != EFX_RX_HASHALG_TOEPLITZ) || (insert == B_FALSE)) { rc = EINVAL; goto fail1; } if (rss_context == EFX_RSS_CONTEXT_DEFAULT) { if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) { rc = ENOTSUP; goto fail2; } rss_context = enp->en_rss_context; } if ((rc = efx_mcdi_rss_context_set_flags(enp, rss_context, type)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t ef10_rx_scale_key_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) uint8_t *key, __in size_t n) { efx_rc_t rc; EFX_STATIC_ASSERT(EFX_RSS_KEY_SIZE == MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); if (rss_context == EFX_RSS_CONTEXT_DEFAULT) { if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) { rc = ENOTSUP; goto fail1; } rss_context = enp->en_rss_context; } if ((rc = efx_mcdi_rss_context_set_key(enp, rss_context, key, n)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t ef10_rx_scale_tbl_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) unsigned int *table, __in size_t n) { efx_rc_t rc; if (rss_context == EFX_RSS_CONTEXT_DEFAULT) { if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) { rc = ENOTSUP; goto fail1; } rss_context = enp->en_rss_context; } if ((rc = efx_mcdi_rss_context_set_table(enp, rss_context, table, n)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ /* * EF10 RX pseudo-header * --------------------- * * Receive packets are prefixed by an (optional) 14 byte pseudo-header: * * +00: Toeplitz hash value. * (32bit little-endian) * +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag. * (16bit big-endian) * +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag. * (16bit big-endian) * +08: Packet Length. Zero if the RX datapath was in cut-through mode. * (16bit little-endian) * +10: MAC timestamp. Zero if timestamping is not enabled. * (32bit little-endian) * * See "The RX Pseudo-header" in SF-109306-TC. */ __checkReturn efx_rc_t ef10_rx_prefix_pktlen( __in efx_nic_t *enp, __in uint8_t *buffer, __out uint16_t *lengthp) { _NOTE(ARGUNUSED(enp)) /* * The RX pseudo-header contains the packet length, excluding the * pseudo-header. If the hardware receive datapath was operating in * cut-through mode then the length in the RX pseudo-header will be * zero, and the packet length must be obtained from the DMA length * reported in the RX event. */ *lengthp = buffer[8] | (buffer[9] << 8); return (0); } #if EFSYS_OPT_RX_SCALE __checkReturn uint32_t ef10_rx_prefix_hash( __in efx_nic_t *enp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer) { _NOTE(ARGUNUSED(enp)) switch (func) { case EFX_RX_HASHALG_TOEPLITZ: return (buffer[0] | (buffer[1] << 8) | (buffer[2] << 16) | (buffer[3] << 24)); default: EFSYS_ASSERT(0); return (0); } } #endif /* EFSYS_OPT_RX_SCALE */ void ef10_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added) { efx_qword_t qword; unsigned int i; unsigned int offset; unsigned int id; /* The client driver must not overfill the queue */ EFSYS_ASSERT3U(added - completed + n, <=, EFX_RXQ_LIMIT(erp->er_mask + 1)); id = added & (erp->er_mask); for (i = 0; i < n; i++) { EFSYS_PROBE4(rx_post, unsigned int, erp->er_index, unsigned int, id, efsys_dma_addr_t, addrp[i], size_t, size); EFX_POPULATE_QWORD_3(qword, ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size), ESF_DZ_RX_KER_BUF_ADDR_DW0, (uint32_t)(addrp[i] & 0xffffffff), ESF_DZ_RX_KER_BUF_ADDR_DW1, (uint32_t)(addrp[i] >> 32)); offset = id * sizeof (efx_qword_t); EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword); id = (id + 1) & (erp->er_mask); } } void ef10_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp) { efx_nic_t *enp = erp->er_enp; unsigned int pushed = *pushedp; uint32_t wptr; efx_dword_t dword; /* Hardware has alignment restriction for WPTR */ wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN); if (pushed == wptr) return; *pushedp = wptr; /* Push the populated descriptors out */ wptr &= erp->er_mask; EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr); /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1, wptr, pushed & erp->er_mask); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, erp->er_index, &dword, B_FALSE); } #if EFSYS_OPT_RX_PACKED_STREAM void ef10_rx_qpush_ps_credits( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_dword_t dword; efx_evq_rxq_state_t *rxq_state = &erp->er_eep->ee_rxq_state[erp->er_label]; uint32_t credits; EFSYS_ASSERT(rxq_state->eers_rx_packed_stream); if (rxq_state->eers_rx_packed_stream_credits == 0) return; /* * It is a bug if we think that FW has utilized more * credits than it is allowed to have (maximum). However, * make sure that we do not credit more than maximum anyway. */ credits = MIN(rxq_state->eers_rx_packed_stream_credits, EFX_RX_PACKED_STREAM_MAX_CREDITS); EFX_POPULATE_DWORD_3(dword, ERF_DZ_RX_DESC_MAGIC_DOORBELL, 1, ERF_DZ_RX_DESC_MAGIC_CMD, ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS, ERF_DZ_RX_DESC_MAGIC_DATA, credits); EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, erp->er_index, &dword, B_FALSE); rxq_state->eers_rx_packed_stream_credits = 0; } /* * In accordance with SF-112241-TC the received data has the following layout: * - 8 byte pseudo-header which consist of: * - 4 byte little-endian timestamp * - 2 byte little-endian captured length in bytes * - 2 byte little-endian original packet length in bytes * - captured packet bytes * - optional padding to align to 64 bytes boundary * - 64 bytes scratch space for the host software */ __checkReturn uint8_t * ef10_rx_qps_packet_info( __in efx_rxq_t *erp, __in uint8_t *buffer, __in uint32_t buffer_length, __in uint32_t current_offset, __out uint16_t *lengthp, __out uint32_t *next_offsetp, __out uint32_t *timestamp) { uint16_t buf_len; uint8_t *pkt_start; efx_qword_t *qwordp; efx_evq_rxq_state_t *rxq_state = &erp->er_eep->ee_rxq_state[erp->er_label]; EFSYS_ASSERT(rxq_state->eers_rx_packed_stream); buffer += current_offset; pkt_start = buffer + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE; qwordp = (efx_qword_t *)buffer; *timestamp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_TSTAMP); *lengthp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_ORIG_LEN); buf_len = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_CAP_LEN); buf_len = P2ROUNDUP(buf_len + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE, EFX_RX_PACKED_STREAM_ALIGNMENT); *next_offsetp = current_offset + buf_len + EFX_RX_PACKED_STREAM_ALIGNMENT; EFSYS_ASSERT3U(*next_offsetp, <=, buffer_length); EFSYS_ASSERT3U(current_offset + *lengthp, <, *next_offsetp); if ((*next_offsetp ^ current_offset) & EFX_RX_PACKED_STREAM_MEM_PER_CREDIT) rxq_state->eers_rx_packed_stream_credits++; return (pkt_start); } #endif __checkReturn efx_rc_t ef10_rx_qflush( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_rc_t rc; if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0) goto fail1; return (0); fail1: /* * EALREADY is not an error, but indicates that the MC has rebooted and * that the RXQ has already been destroyed. Callers need to know that * the RXQ flush has completed to avoid waiting until timeout for a * flush done event that will not be delivered. */ if (rc != EALREADY) EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_rx_qenable( __in efx_rxq_t *erp) { /* FIXME */ _NOTE(ARGUNUSED(erp)) /* FIXME */ } __checkReturn efx_rc_t ef10_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __in efx_rxq_t *erp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_rc_t rc; boolean_t disable_scatter; unsigned int ps_buf_size; _NOTE(ARGUNUSED(id, erp)) EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH)); EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS); EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit); EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS)); EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS)); if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_rxq_limit) { rc = EINVAL; goto fail2; } switch (type) { case EFX_RXQ_TYPE_DEFAULT: case EFX_RXQ_TYPE_SCATTER: ps_buf_size = 0; break; #if EFSYS_OPT_RX_PACKED_STREAM case EFX_RXQ_TYPE_PACKED_STREAM_1M: ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M; break; case EFX_RXQ_TYPE_PACKED_STREAM_512K: ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K; break; case EFX_RXQ_TYPE_PACKED_STREAM_256K: ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K; break; case EFX_RXQ_TYPE_PACKED_STREAM_128K: ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K; break; case EFX_RXQ_TYPE_PACKED_STREAM_64K: ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K; break; #endif /* EFSYS_OPT_RX_PACKED_STREAM */ default: rc = ENOTSUP; goto fail3; } #if EFSYS_OPT_RX_PACKED_STREAM if (ps_buf_size != 0) { /* Check if datapath firmware supports packed stream mode */ if (encp->enc_rx_packed_stream_supported == B_FALSE) { rc = ENOTSUP; goto fail4; } /* Check if packed stream allows configurable buffer sizes */ if ((type != EFX_RXQ_TYPE_PACKED_STREAM_1M) && (encp->enc_rx_var_packed_stream_supported == B_FALSE)) { rc = ENOTSUP; goto fail5; } } #else /* EFSYS_OPT_RX_PACKED_STREAM */ EFSYS_ASSERT(ps_buf_size == 0); #endif /* EFSYS_OPT_RX_PACKED_STREAM */ /* Scatter can only be disabled if the firmware supports doing so */ if (type == EFX_RXQ_TYPE_SCATTER) disable_scatter = B_FALSE; else disable_scatter = encp->enc_rx_disable_scatter_supported; if ((rc = efx_mcdi_init_rxq(enp, n, eep->ee_index, label, index, esmp, disable_scatter, ps_buf_size)) != 0) goto fail6; erp->er_eep = eep; erp->er_label = label; - ef10_ev_rxlabel_init(eep, erp, label, ps_buf_size != 0); + ef10_ev_rxlabel_init(eep, erp, label, type); return (0); fail6: EFSYS_PROBE(fail6); #if EFSYS_OPT_RX_PACKED_STREAM fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); #endif /* EFSYS_OPT_RX_PACKED_STREAM */ fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_rx_qdestroy( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_evq_t *eep = erp->er_eep; unsigned int label = erp->er_label; ef10_ev_rxlabel_fini(eep, label); EFSYS_ASSERT(enp->en_rx_qcount != 0); --enp->en_rx_qcount; EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp); } void ef10_rx_fini( __in efx_nic_t *enp) { #if EFSYS_OPT_RX_SCALE if (enp->en_rss_context_type != EFX_RX_SCALE_UNAVAILABLE) (void) efx_mcdi_rss_context_free(enp, enp->en_rss_context); enp->en_rss_context = 0; enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE; #else _NOTE(ARGUNUSED(enp)) #endif /* EFSYS_OPT_RX_SCALE */ } #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */