Index: head/sys/dev/sfxge/common/ef10_ev.c =================================================================== --- head/sys/dev/sfxge/common/ef10_ev.c (revision 310692) +++ head/sys/dev/sfxge/common/ef10_ev.c (revision 310693) @@ -1,1218 +1,1218 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_MON_STATS #include "mcdi_mon.h" #endif #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_QSTATS #define EFX_EV_QSTAT_INCR(_eep, _stat) \ do { \ (_eep)->ee_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_EV_QSTAT_INCR(_eep, _stat) #endif static __checkReturn boolean_t ef10_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn efx_rc_t efx_mcdi_set_evq_tmr( __in efx_nic_t *enp, __in uint32_t instance, __in uint32_t mode, __in uint32_t timer_ns) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN, MC_CMD_SET_EVQ_TMR_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_EVQ_TMR; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN; MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns); MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns); MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) { rc = EMSGSIZE; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_init_evq( __in efx_nic_t *enp, __in unsigned int instance, __in efsys_mem_t *esmp, __in size_t nevs, __in uint32_t irq, __in uint32_t us, __in boolean_t low_latency) { efx_mcdi_req_t req; uint8_t payload[ MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), MC_CMD_INIT_EVQ_OUT_LEN)]; efx_qword_t *dma_addr; uint64_t addr; int npages; int i; int ev_cut_through; efx_rc_t rc; npages = EFX_EVQ_NBUFS(nevs); if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq); /* * On Huntington RX and TX event batching can only be requested together * (even if the datapath firmware doesn't actually support RX * batching). If event cut through is enabled no RX batching will occur. * * So always enable RX and TX event batching, and enable event cut * through if we want low latency operation. */ ev_cut_through = low_latency ? 1 : 0; MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS, INIT_EVQ_IN_FLAG_INTERRUPTING, 1, INIT_EVQ_IN_FLAG_RPTR_DOS, 0, INIT_EVQ_IN_FLAG_INT_ARMD, 0, INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through, INIT_EVQ_IN_FLAG_RX_MERGE, 1, INIT_EVQ_IN_FLAG_TX_MERGE, 1); /* If the value is zero then disable the timer */ if (us == 0) { MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0); } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail2; MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks); } MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE, MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { rc = EMSGSIZE; goto fail4; } /* NOTE: ignore the returned IRQ param as firmware does not set it. */ return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_init_evq_v2( __in efx_nic_t *enp, __in unsigned int instance, __in efsys_mem_t *esmp, __in size_t nevs, __in uint32_t irq, __in uint32_t us) { efx_mcdi_req_t req; uint8_t payload[ MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), MC_CMD_INIT_EVQ_V2_OUT_LEN)]; efx_qword_t *dma_addr; uint64_t addr; int npages; int i; efx_rc_t rc; npages = EFX_EVQ_NBUFS(nevs); if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq); MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS, INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1, INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0, INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0, INIT_EVQ_V2_IN_FLAG_TYPE, MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO); /* If the value is zero then disable the timer */ if (us == 0) { MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0); } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail2; MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks); } MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE, MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) { rc = EMSGSIZE; goto fail4; } /* NOTE: ignore the returned IRQ param as firmware does not set it. */ EFSYS_PROBE1(mcdi_evq_flags, uint32_t, MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS)); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_fini_evq( __in efx_nic_t *enp, __in uint32_t instance) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN, MC_CMD_FINI_EVQ_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FINI_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_ev_init( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) return (0); } void ef10_ev_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } __checkReturn efx_rc_t ef10_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint32_t us, __in efx_evq_t *eep) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t irq; efx_rc_t rc; _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_evq_limit) { rc = EINVAL; goto fail2; } if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail3; } /* Set up the handler table */ eep->ee_rx = ef10_ev_rx; eep->ee_tx = ef10_ev_tx; eep->ee_driver = ef10_ev_driver; eep->ee_drv_gen = ef10_ev_drv_gen; eep->ee_mcdi = ef10_ev_mcdi; /* Set up the event queue */ irq = index; /* INIT_EVQ expects function-relative vector number */ /* * Interrupts may be raised for events immediately after the queue is * created. See bug58606. */ if (encp->enc_init_evq_v2_supported) { /* * On Medford the low latency license is required to enable RX * and event cut through and to disable RX batching. We let the * firmware decide the settings to use. If the adapter has a low * latency license, it will choose the best settings for low * latency, otherwise it choose the best settings for * throughput. */ rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us); if (rc != 0) goto fail4; } else { /* * On Huntington we need to specify the settings to use. We * favour latency if the adapter is running low-latency firmware * and throughput otherwise, and assume not support RX batching * implies the adapter is running low-latency firmware. (This * is how it's been done since Huntington GA. It doesn't make * much sense with hindsight as the 'low-latency' firmware * variant is also best for throughput, and does now support RX * batching). */ boolean_t low_latency = encp->enc_rx_batching_enabled ? 0 : 1; rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, low_latency); if (rc != 0) goto fail5; } return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_ev_qdestroy( __in efx_evq_t *eep) { efx_nic_t *enp = eep->ee_enp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index); } __checkReturn efx_rc_t ef10_ev_qprime( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; uint32_t rptr; efx_dword_t dword; rptr = count & eep->ee_mask; if (enp->en_nic_cfg.enc_bug35388_workaround) { EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS > (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS < (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_POPULATE_DWORD_2(dword, ERF_DD_EVQ_IND_RPTR_FLAGS, EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, ERF_DD_EVQ_IND_RPTR, (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); EFX_POPULATE_DWORD_2(dword, ERF_DD_EVQ_IND_RPTR_FLAGS, EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, ERF_DD_EVQ_IND_RPTR, rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); } else { EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, &dword, B_FALSE); } return (0); } static __checkReturn efx_rc_t efx_mcdi_driver_event( __in efx_nic_t *enp, __in uint32_t evq, __in efx_qword_t data) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN, MC_CMD_DRIVER_EVENT_OUT_LEN)]; efx_rc_t rc; req.emr_cmd = MC_CMD_DRIVER_EVENT; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN; MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq); MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO, EFX_QWORD_FIELD(data, EFX_DWORD_0)); MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI, EFX_QWORD_FIELD(data, EFX_DWORD_1)); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_ev_qpost( __in efx_evq_t *eep, __in uint16_t data) { efx_nic_t *enp = eep->ee_enp; efx_qword_t event; EFX_POPULATE_QWORD_3(event, ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV, ESF_DZ_DRV_SUB_CODE, 0, ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data); (void) efx_mcdi_driver_event(enp, eep->ee_index, event); } __checkReturn efx_rc_t ef10_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us) { efx_nic_t *enp = eep->ee_enp; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_dword_t dword; uint32_t mode; efx_rc_t rc; /* Check that hardware and MCDI use the same timer MODE values */ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS); EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START); EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START); EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF); if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail1; } /* If the value is zero then disable the timer */ if (us == 0) { mode = FFE_CZ_TIMER_MODE_DIS; } else { mode = FFE_CZ_TIMER_MODE_INT_HLDOFF; } if (encp->enc_bug61265_workaround) { uint32_t ns = us * 1000; rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns); if (rc != 0) goto fail2; } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail3; if (encp->enc_bug35388_workaround) { EFX_POPULATE_DWORD_3(dword, ERF_DD_EVQ_IND_TIMER_FLAGS, EFE_DD_EVQ_IND_TIMER_FLAGS, ERF_DD_EVQ_IND_TIMER_MODE, mode, ERF_DD_EVQ_IND_TIMER_VAL, ticks); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, 0); } else { EFX_POPULATE_DWORD_2(dword, ERF_DZ_TC_TIMER_MODE, mode, ERF_DZ_TC_TIMER_VAL, ticks); EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG, eep->ee_index, &dword, 0); } } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_QSTATS void ef10_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) { unsigned int id; for (id = 0; id < EV_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, eep->ee_stat[id]); eep->ee_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ static __checkReturn boolean_t ef10_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; uint32_t size; uint32_t label; uint32_t mac_class; uint32_t eth_tag_class; uint32_t l3_class; uint32_t l4_class; uint32_t next_read_lbits; uint16_t flags; boolean_t cont; boolean_t should_abort; efx_evq_rxq_state_t *eersp; unsigned int desc_count; unsigned int last_used_id; EFX_EV_QSTAT_INCR(eep, EV_RX); /* Discard events after RXQ/TXQ errors */ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) return (B_FALSE); /* Basic packet information */ size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS); cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { /* Drop this event */ return (B_FALSE); } flags = 0; if (cont != 0) { /* * This may be part of a scattered frame, or it may be a * truncated frame if scatter is disabled on this RXQ. * Overlength frames can be received if e.g. a VF is configured * for 1500 MTU but connected to a port set to 9000 MTU * (see bug56567). * FIXME: There is not yet any driver that supports scatter on * Huntington. Scatter support is required for OSX. */ flags |= EFX_PKT_CONT; } if (mac_class == ESE_DZ_MAC_CLASS_UCAST) flags |= EFX_PKT_UNICAST; /* Increment the count of descriptors read */ eersp = &eep->ee_rxq_state[label]; desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) & EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); eersp->eers_rx_read_ptr += desc_count; /* * FIXME: add error checking to make sure this a batched event. * This could also be an aborted scatter, see Bug36629. */ if (desc_count > 1) { EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); flags |= EFX_PKT_PREFIX_LEN; } /* Calculate the index of the last descriptor consumed */ last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; /* Check for errors that invalidate checksum and L3/L4 fields */ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { /* RX frame truncated (error flag is misnamed) */ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); flags |= EFX_DISCARD; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { /* Bad Ethernet frame CRC */ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); flags |= EFX_DISCARD; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { /* * Hardware parse failed, due to malformed headers * or headers that are too long for the parser. * Headers and checksums must be validated by the host. */ /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */ goto deliver; } if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) || (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) { flags |= EFX_PKT_VLAN_TAGGED; } switch (l3_class) { case ESE_DZ_L3_CLASS_IP4: case ESE_DZ_L3_CLASS_IP4_FRAG: flags |= EFX_PKT_IPV4; if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) { EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); } else { flags |= EFX_CKSUM_IPV4; } if (l4_class == ESE_DZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); flags |= EFX_PKT_TCP; } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); flags |= EFX_PKT_UDP; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); } break; case ESE_DZ_L3_CLASS_IP6: case ESE_DZ_L3_CLASS_IP6_FRAG: flags |= EFX_PKT_IPV6; if (l4_class == ESE_DZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); flags |= EFX_PKT_TCP; } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); flags |= EFX_PKT_UDP; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); } break; default: EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); break; } if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) { if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); } else { flags |= EFX_CKSUM_TCPUDP; } } deliver: /* If we're not discarding the packet then it is ok */ if (~flags & EFX_DISCARD) EFX_EV_QSTAT_INCR(eep, EV_RX_OK); EFSYS_ASSERT(eecp->eec_rx != NULL); should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags); return (should_abort); } static __checkReturn boolean_t ef10_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; uint32_t id; uint32_t label; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_TX); /* Discard events after RXQ/TXQ errors */ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) return (B_FALSE); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) { /* Drop this event */ return (B_FALSE); } /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX); label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL); EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); EFSYS_ASSERT(eecp->eec_tx != NULL); should_abort = eecp->eec_tx(arg, label, id); return (should_abort); } static __checkReturn boolean_t ef10_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { unsigned int code; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRIVER); should_abort = B_FALSE; code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE); switch (code) { case ESE_DZ_DRV_TIMER_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID); EFSYS_ASSERT(eecp->eec_timer != NULL); should_abort = eecp->eec_timer(arg, id); break; } case ESE_DZ_DRV_WAKE_UP_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID); EFSYS_ASSERT(eecp->eec_wake_up != NULL); should_abort = eecp->eec_wake_up(arg, id); break; } case ESE_DZ_DRV_START_UP_EV: EFSYS_ASSERT(eecp->eec_initialized != NULL); should_abort = eecp->eec_initialized(arg); break; default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); break; } return (should_abort); } static __checkReturn boolean_t ef10_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t data; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); should_abort = B_FALSE; data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0); if (data >= ((uint32_t)1 << 16)) { EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); return (B_TRUE); } EFSYS_ASSERT(eecp->eec_software != NULL); should_abort = eecp->eec_software(arg, (uint16_t)data); return (should_abort); } static __checkReturn boolean_t ef10_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; - unsigned code; + unsigned int code; boolean_t should_abort = B_FALSE; EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); switch (code) { case MCDI_EVENT_CODE_BADSSERT: efx_mcdi_ev_death(enp, EINTR); break; case MCDI_EVENT_CODE_CMDDONE: efx_mcdi_ev_cpl(enp, MCDI_EV_FIELD(eqp, CMDDONE_SEQ), MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); break; #if EFSYS_OPT_MCDI_PROXY_AUTH case MCDI_EVENT_CODE_PROXY_RESPONSE: /* * This event notifies a function that an authorization request * has been processed. If the request was authorized then the * function can now re-send the original MCDI request. * See SF-113652-SW "SR-IOV Proxied Network Access Control". */ efx_mcdi_ev_proxy_response(enp, MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE), MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC)); break; #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ case MCDI_EVENT_CODE_LINKCHANGE: { efx_link_mode_t link_mode; ef10_phy_link_ev(enp, eqp, &link_mode); should_abort = eecp->eec_link_change(arg, link_mode); break; } case MCDI_EVENT_CODE_SENSOREVT: { #if EFSYS_OPT_MON_STATS efx_mon_stat_t id; efx_mon_stat_value_t value; efx_rc_t rc; /* Decode monitor stat for MCDI sensor (if supported) */ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) { /* Report monitor stat change */ should_abort = eecp->eec_monitor(arg, id, value); } else if (rc == ENOTSUP) { should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_SENSOREVT, MCDI_EV_FIELD(eqp, DATA)); } else { EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ } #endif break; } case MCDI_EVENT_CODE_SCHEDERR: /* Informational only */ break; case MCDI_EVENT_CODE_REBOOT: /* Falcon/Siena only (should not been seen with Huntington). */ efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MC_REBOOT: /* MC_REBOOT event is used for Huntington (EF10) and later. */ efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: #if EFSYS_OPT_MAC_STATS if (eecp->eec_mac_stats != NULL) { eecp->eec_mac_stats(arg, MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); } #endif break; case MCDI_EVENT_CODE_FWALERT: { uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_FWALERT_SRAM, MCDI_EV_FIELD(eqp, FWALERT_DATA)); else should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_FWALERT, MCDI_EV_FIELD(eqp, DATA)); break; } case MCDI_EVENT_CODE_TX_ERR: { /* * After a TXQ error is detected, firmware sends a TX_ERR event. * This may be followed by TX completions (which we discard), * and then finally by a TX_FLUSH event. Firmware destroys the * TXQ automatically after sending the TX_FLUSH event. */ enp->en_reset_flags |= EFX_RESET_TXQ_ERR; EFSYS_PROBE2(tx_descq_err, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); /* Inform the driver that a reset is required. */ eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR, MCDI_EV_FIELD(eqp, TX_ERR_DATA)); break; } case MCDI_EVENT_CODE_TX_FLUSH: { uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ); /* * EF10 firmware sends two TX_FLUSH events: one to the txq's * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set). * We want to wait for all completions, so ignore the events * with TX_FLUSH_TO_DRIVER. */ if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) { should_abort = B_FALSE; break; } EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); should_abort = eecp->eec_txq_flush_done(arg, txq_index); break; } case MCDI_EVENT_CODE_RX_ERR: { /* * After an RXQ error is detected, firmware sends an RX_ERR * event. This may be followed by RX events (which we discard), * and then finally by an RX_FLUSH event. Firmware destroys the * RXQ automatically after sending the RX_FLUSH event. */ enp->en_reset_flags |= EFX_RESET_RXQ_ERR; EFSYS_PROBE2(rx_descq_err, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); /* Inform the driver that a reset is required. */ eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR, MCDI_EV_FIELD(eqp, RX_ERR_DATA)); break; } case MCDI_EVENT_CODE_RX_FLUSH: { uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ); /* * EF10 firmware sends two RX_FLUSH events: one to the rxq's * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set). * We want to wait for all completions, so ignore the events * with RX_FLUSH_TO_DRIVER. */ if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) { should_abort = B_FALSE; break; } EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); break; } default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); break; } return (should_abort); } void ef10_ev_rxlabel_init( __in efx_evq_t *eep, __in efx_rxq_t *erp, __in unsigned int label) { efx_evq_rxq_state_t *eersp; EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); eersp = &eep->ee_rxq_state[label]; EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0); eersp->eers_rx_read_ptr = 0; eersp->eers_rx_mask = erp->er_mask; } void ef10_ev_rxlabel_fini( __in efx_evq_t *eep, __in unsigned int label) { efx_evq_rxq_state_t *eersp; EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); eersp = &eep->ee_rxq_state[label]; EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0); eersp->eers_rx_read_ptr = 0; eersp->eers_rx_mask = 0; } #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: head/sys/dev/sfxge/common/ef10_filter.c =================================================================== --- head/sys/dev/sfxge/common/ef10_filter.c (revision 310692) +++ head/sys/dev/sfxge/common/ef10_filter.c (revision 310693) @@ -1,1482 +1,1482 @@ /*- * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_FILTER #define EFE_SPEC(eftp, index) ((eftp)->eft_entry[(index)].efe_spec) static efx_filter_spec_t * ef10_filter_entry_spec( __in const ef10_filter_table_t *eftp, __in unsigned int index) { return ((efx_filter_spec_t *)(EFE_SPEC(eftp, index) & ~(uintptr_t)EFX_EF10_FILTER_FLAGS)); } static boolean_t ef10_filter_entry_is_busy( __in const ef10_filter_table_t *eftp, __in unsigned int index) { if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_BUSY) return (B_TRUE); else return (B_FALSE); } static boolean_t ef10_filter_entry_is_auto_old( __in const ef10_filter_table_t *eftp, __in unsigned int index) { if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_AUTO_OLD) return (B_TRUE); else return (B_FALSE); } static void ef10_filter_set_entry( __inout ef10_filter_table_t *eftp, __in unsigned int index, __in_opt const efx_filter_spec_t *efsp) { EFE_SPEC(eftp, index) = (uintptr_t)efsp; } static void ef10_filter_set_entry_busy( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_BUSY; } static void ef10_filter_set_entry_not_busy( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_BUSY; } static void ef10_filter_set_entry_auto_old( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL); EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD; } static void ef10_filter_set_entry_not_auto_old( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD; EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL); } __checkReturn efx_rc_t ef10_filter_init( __in efx_nic_t *enp) { efx_rc_t rc; ef10_filter_table_t *eftp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); #define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match)) EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_IP)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_IP)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_MAC)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_PORT)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO)); #undef MATCH_MASK EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp); if (!eftp) { rc = ENOMEM; goto fail1; } enp->en_filter.ef_ef10_filter_table = eftp; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_filter_fini( __in efx_nic_t *enp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); if (enp->en_filter.ef_ef10_filter_table != NULL) { EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t), enp->en_filter.ef_ef10_filter_table); } } static __checkReturn efx_rc_t efx_mcdi_filter_op_add( __in efx_nic_t *enp, __in efx_filter_spec_t *spec, __in unsigned int filter_op, __inout ef10_filter_handle_t *handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN, MC_CMD_FILTER_OP_OUT_LEN)]; uint32_t match_fields = 0; efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FILTER_OP; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN; switch (filter_op) { case MC_CMD_FILTER_OP_IN_OP_REPLACE: MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO, handle->efh_lo); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI, handle->efh_hi); /* Fall through */ case MC_CMD_FILTER_OP_IN_OP_INSERT: case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE: MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, filter_op); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail1; } if (spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { /* * The LOC_MAC_IG match flag can represent unknown unicast * or multicast filters - use the MAC address to distinguish * them. */ if (EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac)) match_fields |= 1U << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN; else match_fields |= 1U << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; } match_fields |= spec->efs_match_flags & (~EFX_FILTER_MATCH_LOC_MAC_IG); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_MATCH_FIELDS, match_fields); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_DEST, MC_CMD_FILTER_OP_IN_RX_DEST_HOST); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_QUEUE, spec->efs_dmaq_id); if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) { MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_CONTEXT, spec->efs_rss_context); } MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_MODE, spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ? MC_CMD_FILTER_OP_IN_RX_MODE_RSS : MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_TX_DEST, MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) { /* * NOTE: Unlike most MCDI requests, the filter fields * are presented in network (big endian) byte order. */ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_MAC), spec->efs_rem_mac, EFX_MAC_ADDR_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_MAC), spec->efs_loc_mac, EFX_MAC_ADDR_LEN); MCDI_IN_SET_WORD(req, FILTER_OP_IN_SRC_PORT, __CPU_TO_BE_16(spec->efs_rem_port)); MCDI_IN_SET_WORD(req, FILTER_OP_IN_DST_PORT, __CPU_TO_BE_16(spec->efs_loc_port)); MCDI_IN_SET_WORD(req, FILTER_OP_IN_ETHER_TYPE, __CPU_TO_BE_16(spec->efs_ether_type)); MCDI_IN_SET_WORD(req, FILTER_OP_IN_INNER_VLAN, __CPU_TO_BE_16(spec->efs_inner_vid)); MCDI_IN_SET_WORD(req, FILTER_OP_IN_OUTER_VLAN, __CPU_TO_BE_16(spec->efs_outer_vid)); /* IP protocol (in low byte, high byte is zero) */ MCDI_IN_SET_BYTE(req, FILTER_OP_IN_IP_PROTO, spec->efs_ip_proto); EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) == MC_CMD_FILTER_OP_IN_SRC_IP_LEN); EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) == MC_CMD_FILTER_OP_IN_DST_IP_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_IP), &spec->efs_rem_host.eo_byte[0], MC_CMD_FILTER_OP_IN_SRC_IP_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_IP), &spec->efs_loc_host.eo_byte[0], MC_CMD_FILTER_OP_IN_DST_IP_LEN); } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) { rc = EMSGSIZE; goto fail3; } handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_LO); handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_HI); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_filter_op_delete( __in efx_nic_t *enp, __in unsigned int filter_op, __inout ef10_filter_handle_t *handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN, MC_CMD_FILTER_OP_OUT_LEN)]; efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FILTER_OP; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN; switch (filter_op) { case MC_CMD_FILTER_OP_IN_OP_REMOVE: MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, MC_CMD_FILTER_OP_IN_OP_REMOVE); break; case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE: MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail1; } MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO, handle->efh_lo); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI, handle->efh_hi); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) { rc = EMSGSIZE; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn boolean_t ef10_filter_equal( __in const efx_filter_spec_t *left, __in const efx_filter_spec_t *right) { /* FIXME: Consider rx vs tx filters (look at efs_flags) */ if (left->efs_match_flags != right->efs_match_flags) return (B_FALSE); if (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host)) return (B_FALSE); if (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host)) return (B_FALSE); if (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN)) return (B_FALSE); if (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN)) return (B_FALSE); if (left->efs_rem_port != right->efs_rem_port) return (B_FALSE); if (left->efs_loc_port != right->efs_loc_port) return (B_FALSE); if (left->efs_inner_vid != right->efs_inner_vid) return (B_FALSE); if (left->efs_outer_vid != right->efs_outer_vid) return (B_FALSE); if (left->efs_ether_type != right->efs_ether_type) return (B_FALSE); if (left->efs_ip_proto != right->efs_ip_proto) return (B_FALSE); return (B_TRUE); } static __checkReturn boolean_t ef10_filter_same_dest( __in const efx_filter_spec_t *left, __in const efx_filter_spec_t *right) { if ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) && (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) { if (left->efs_rss_context == right->efs_rss_context) return (B_TRUE); } else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) && (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) { if (left->efs_dmaq_id == right->efs_dmaq_id) return (B_TRUE); } return (B_FALSE); } static __checkReturn uint32_t ef10_filter_hash( __in efx_filter_spec_t *spec) { EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t)) == 0); EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) % sizeof (uint32_t)) == 0); /* * As the area of the efx_filter_spec_t we need to hash is DWORD * aligned and an exact number of DWORDs in size we can use the * optimised efx_hash_dwords() rather than efx_hash_bytes() */ return (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid, (sizeof (efx_filter_spec_t) - EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) / sizeof (uint32_t), 0)); } /* * Decide whether a filter should be exclusive or else should allow * delivery to additional recipients. Currently we decide that * filters for specific local unicast MAC and IP addresses are * exclusive. */ static __checkReturn boolean_t ef10_filter_is_exclusive( __in efx_filter_spec_t *spec) { if ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) && !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac)) return (B_TRUE); if ((spec->efs_match_flags & (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) && ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe)) return (B_TRUE); if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) && (spec->efs_loc_host.eo_u8[0] != 0xff)) return (B_TRUE); } return (B_FALSE); } __checkReturn efx_rc_t ef10_filter_restore( __in efx_nic_t *enp) { int tbl_id; efx_filter_spec_t *spec; ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; boolean_t restoring; int state; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) { EFSYS_LOCK(enp->en_eslp, state); spec = ef10_filter_entry_spec(eftp, tbl_id); if (spec == NULL) { restoring = B_FALSE; } else if (ef10_filter_entry_is_busy(eftp, tbl_id)) { /* Ignore busy entries. */ restoring = B_FALSE; } else { ef10_filter_set_entry_busy(eftp, tbl_id); restoring = B_TRUE; } EFSYS_UNLOCK(enp->en_eslp, state); if (restoring == B_FALSE) continue; if (ef10_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_INSERT, &eftp->eft_entry[tbl_id].efe_handle); } else { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE, &eftp->eft_entry[tbl_id].efe_handle); } if (rc != 0) goto fail1; EFSYS_LOCK(enp->en_eslp, state); ef10_filter_set_entry_not_busy(eftp, tbl_id); EFSYS_UNLOCK(enp->en_eslp, state); } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * An arbitrary search limit for the software hash table. As per the linux net * driver. */ #define EF10_FILTER_SEARCH_LIMIT 200 static __checkReturn efx_rc_t ef10_filter_add_internal( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace, __out_opt uint32_t *filter_id) { efx_rc_t rc; ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t *saved_spec; uint32_t hash; unsigned int depth; int ins_index; boolean_t replacing = B_FALSE; unsigned int i; int state; boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); #if EFSYS_OPT_RX_SCALE spec->efs_rss_context = enp->en_rss_context; #endif hash = ef10_filter_hash(spec); /* * FIXME: Add support for inserting filters of different priorities * and removing lower priority multicast filters (bug 42378) */ /* * Find any existing filters with the same match tuple or * else a free slot to insert at. If any of them are busy, * we have to wait and retry. */ for (;;) { ins_index = -1; depth = 1; EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; for (;;) { i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1); saved_spec = ef10_filter_entry_spec(eftp, i); if (!saved_spec) { if (ins_index < 0) { ins_index = i; } } else if (ef10_filter_equal(spec, saved_spec)) { if (ef10_filter_entry_is_busy(eftp, i)) break; if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) { ins_index = i; goto found; } else if (ef10_filter_is_exclusive(spec)) { if (may_replace) { ins_index = i; goto found; } else { rc = EEXIST; goto fail1; } } /* Leave existing */ } /* * Once we reach the maximum search depth, use * the first suitable slot or return EBUSY if * there was none. */ if (depth == EF10_FILTER_SEARCH_LIMIT) { if (ins_index < 0) { rc = EBUSY; goto fail2; } goto found; } depth++; } EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; } found: /* * Create a software table entry if necessary, and mark it * busy. We might yet fail to insert, but any attempt to * insert a conflicting filter while we're waiting for the * firmware must find the busy entry. */ saved_spec = ef10_filter_entry_spec(eftp, ins_index); if (saved_spec) { if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) { /* This is a filter we are refreshing */ ef10_filter_set_entry_not_auto_old(eftp, ins_index); goto out_unlock; } replacing = B_TRUE; } else { EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), saved_spec); if (!saved_spec) { rc = ENOMEM; goto fail3; } *saved_spec = *spec; ef10_filter_set_entry(eftp, ins_index, saved_spec); } ef10_filter_set_entry_busy(eftp, ins_index); EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; /* * On replacing the filter handle may change after after a successful * replace operation. */ if (replacing) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_REPLACE, &eftp->eft_entry[ins_index].efe_handle); } else if (ef10_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_INSERT, &eftp->eft_entry[ins_index].efe_handle); } else { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE, &eftp->eft_entry[ins_index].efe_handle); } if (rc != 0) goto fail4; EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; if (replacing) { /* Update the fields that may differ */ saved_spec->efs_priority = spec->efs_priority; saved_spec->efs_flags = spec->efs_flags; saved_spec->efs_rss_context = spec->efs_rss_context; saved_spec->efs_dmaq_id = spec->efs_dmaq_id; } ef10_filter_set_entry_not_busy(eftp, ins_index); out_unlock: EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; if (filter_id) *filter_id = ins_index; return (0); fail4: EFSYS_PROBE(fail4); if (!replacing) { EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), saved_spec); saved_spec = NULL; } ef10_filter_set_entry_not_busy(eftp, ins_index); ef10_filter_set_entry(eftp, ins_index, NULL); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); if (locked) EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } __checkReturn efx_rc_t ef10_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace) { efx_rc_t rc; rc = ef10_filter_add_internal(enp, spec, may_replace, NULL); if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_delete_internal( __in efx_nic_t *enp, __in uint32_t filter_id) { efx_rc_t rc; ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t *spec; int state; uint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS; /* * Find the software table entry and mark it busy. Don't * remove it yet; any attempt to update while we're waiting * for the firmware must find the busy entry. * * FIXME: What if the busy flag is never cleared? */ EFSYS_LOCK(enp->en_eslp, state); while (ef10_filter_entry_is_busy(table, filter_idx)) { EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_SPIN(1); EFSYS_LOCK(enp->en_eslp, state); } if ((spec = ef10_filter_entry_spec(table, filter_idx)) != NULL) { ef10_filter_set_entry_busy(table, filter_idx); } EFSYS_UNLOCK(enp->en_eslp, state); if (spec == NULL) { rc = ENOENT; goto fail1; } /* * Try to remove the hardware filter. This may fail if the MC has * rebooted (which frees all hardware filter resources). */ if (ef10_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_delete(enp, MC_CMD_FILTER_OP_IN_OP_REMOVE, &table->eft_entry[filter_idx].efe_handle); } else { rc = efx_mcdi_filter_op_delete(enp, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE, &table->eft_entry[filter_idx].efe_handle); } /* Free the software table entry */ EFSYS_LOCK(enp->en_eslp, state); ef10_filter_set_entry_not_busy(table, filter_idx); ef10_filter_set_entry(table, filter_idx, NULL); EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec); /* Check result of hardware filter removal */ if (rc != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec) { efx_rc_t rc; ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t *saved_spec; unsigned int hash; unsigned int depth; unsigned int i; int state; boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); hash = ef10_filter_hash(spec); EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; depth = 1; for (;;) { i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1); saved_spec = ef10_filter_entry_spec(table, i); if (saved_spec && ef10_filter_equal(spec, saved_spec) && ef10_filter_same_dest(spec, saved_spec)) { break; } if (depth == EF10_FILTER_SEARCH_LIMIT) { rc = ENOENT; goto fail1; } depth++; } EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; rc = ef10_filter_delete_internal(enp, i); if (rc != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); if (locked) EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } static __checkReturn efx_rc_t efx_mcdi_get_parser_disp_info( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)]; efx_rc_t rc; uint32_t i; boolean_t support_unknown_ucast = B_FALSE; boolean_t support_unknown_mcast = B_FALSE; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX; MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } *length = MCDI_OUT_DWORD(req, GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES); if (req.emr_out_length_used < MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(*length)) { rc = EMSGSIZE; goto fail2; } memcpy(list, MCDI_OUT2(req, uint32_t, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES), (*length) * sizeof (uint32_t)); EFX_STATIC_ASSERT(sizeof (uint32_t) == MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN); /* * Remove UNKNOWN UCAST and MCAST flags, and if both are present, change * the lower priority one to LOC_MAC_IG. */ for (i = 0; i < *length; i++) { if (list[i] & MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN) { list[i] &= (~MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN); support_unknown_ucast = B_TRUE; } if (list[i] & MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) { list[i] &= (~MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN); support_unknown_mcast = B_TRUE; } if (support_unknown_ucast && support_unknown_mcast) { list[i] &= EFX_FILTER_MATCH_LOC_MAC_IG; break; } } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_filter_supported_filters( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length) { efx_rc_t rc; if ((rc = efx_mcdi_get_parser_disp_info(enp, list, length)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_unicast( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *addr, __in efx_filter_flag_t filter_flags) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; efx_rc_t rc; /* Insert the filter for the local station address */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]); if (rc != 0) goto fail1; eftp->eft_unicst_filter_count++; EFSYS_ASSERT(eftp->eft_unicst_filter_count <= EFX_EF10_FILTER_UNICAST_FILTERS_MAX); return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_all_unicast( __in efx_nic_t *enp, __in efx_filter_flag_t filter_flags) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; efx_rc_t rc; /* Insert the unknown unicast filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); efx_filter_spec_set_uc_def(&spec); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]); if (rc != 0) goto fail1; eftp->eft_unicst_filter_count++; EFSYS_ASSERT(eftp->eft_unicst_filter_count <= EFX_EF10_FILTER_UNICAST_FILTERS_MAX); return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_multicast_list( __in efx_nic_t *enp, __in boolean_t mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count, __in efx_filter_flag_t filter_flags, __in boolean_t rollback) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; uint8_t addr[6]; uint32_t i; uint32_t filter_index; uint32_t filter_count; efx_rc_t rc; if (mulcst == B_FALSE) count = 0; if (count + (brdcst ? 1 : 0) > EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) { /* Too many MAC addresses */ rc = EINVAL; goto fail1; } /* Insert/renew multicast address list filters */ filter_count = 0; for (i = 0; i < count; i++) { efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, &addrs[i * EFX_MAC_ADDR_LEN]); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &filter_index); if (rc == 0) { eftp->eft_mulcst_filter_indexes[filter_count] = filter_index; filter_count++; } else if (rollback == B_TRUE) { /* Only stop upon failure if told to rollback */ goto rollback; } } if (brdcst == B_TRUE) { /* Insert/renew broadcast address filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); EFX_MAC_BROADCAST_ADDR_SET(addr); efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &filter_index); if (rc == 0) { eftp->eft_mulcst_filter_indexes[filter_count] = filter_index; filter_count++; } else if (rollback == B_TRUE) { /* Only stop upon failure if told to rollback */ goto rollback; } } eftp->eft_mulcst_filter_count = filter_count; eftp->eft_using_all_mulcst = B_FALSE; return (0); rollback: /* Remove any filters we have inserted */ i = filter_count; while (i--) { (void) ef10_filter_delete_internal(enp, eftp->eft_mulcst_filter_indexes[i]); } eftp->eft_mulcst_filter_count = 0; fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_all_multicast( __in efx_nic_t *enp, __in efx_filter_flag_t filter_flags) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; efx_rc_t rc; /* Insert the unknown multicast filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); efx_filter_spec_set_mc_def(&spec); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &eftp->eft_mulcst_filter_indexes[0]); if (rc != 0) goto fail1; eftp->eft_mulcst_filter_count = 1; eftp->eft_using_all_mulcst = B_TRUE; /* * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic. */ return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static void ef10_filter_remove_old( __in efx_nic_t *enp) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; uint32_t i; for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) { if (ef10_filter_entry_is_auto_old(table, i)) { (void) ef10_filter_delete_internal(enp, i); } } } static __checkReturn efx_rc_t ef10_filter_get_workarounds( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &enp->en_nic_cfg; uint32_t implemented = 0; uint32_t enabled = 0; efx_rc_t rc; rc = efx_mcdi_get_workarounds(enp, &implemented, &enabled); if (rc == 0) { /* Check if chained multicast filter support is enabled */ if (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807) encp->enc_bug26807_workaround = B_TRUE; else encp->enc_bug26807_workaround = B_FALSE; } else if (rc == ENOTSUP) { /* * Firmware is too old to support GET_WORKAROUNDS, and support * for this workaround was implemented later. */ encp->enc_bug26807_workaround = B_FALSE; } else { goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Reconfigure all filters. * If all_unicst and/or all mulcst filters cannot be applied then * return ENOTSUP (Note the filters for the specified addresses are * still applied in this case). */ __checkReturn efx_rc_t ef10_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count) { efx_nic_cfg_t *encp = &enp->en_nic_cfg; ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; efx_filter_flag_t filter_flags; - unsigned i; + unsigned int i; efx_rc_t all_unicst_rc = 0; efx_rc_t all_mulcst_rc = 0; efx_rc_t rc; if (table->eft_default_rxq == NULL) { /* * Filters direct traffic to the default RXQ, and so cannot be * inserted until it is available. Any currently configured * filters must be removed (ignore errors in case the MC * has rebooted, which removes hardware filters). */ for (i = 0; i < table->eft_unicst_filter_count; i++) { (void) ef10_filter_delete_internal(enp, table->eft_unicst_filter_indexes[i]); } table->eft_unicst_filter_count = 0; for (i = 0; i < table->eft_mulcst_filter_count; i++) { (void) ef10_filter_delete_internal(enp, table->eft_mulcst_filter_indexes[i]); } table->eft_mulcst_filter_count = 0; return (0); } if (table->eft_using_rss) filter_flags = EFX_FILTER_FLAG_RX_RSS; else filter_flags = 0; /* Mark old filters which may need to be removed */ for (i = 0; i < table->eft_unicst_filter_count; i++) { ef10_filter_set_entry_auto_old(table, table->eft_unicst_filter_indexes[i]); } for (i = 0; i < table->eft_mulcst_filter_count; i++) { ef10_filter_set_entry_auto_old(table, table->eft_mulcst_filter_indexes[i]); } /* * Insert or renew unicast filters. * * Frimware does not perform chaining on unicast filters. As traffic is * therefore only delivered to the first matching filter, we should * always insert the specific filter for our MAC address, to try and * ensure we get that traffic. * * (If the filter for our MAC address has already been inserted by * another function, we won't receive traffic sent to us, even if we * insert a unicast mismatch filter. To prevent traffic stealing, this * therefore relies on the privilege model only allowing functions to * insert filters for their own MAC address unless explicitly given * additional privileges by the user. This also means that, even on a * priviliged function, inserting a unicast mismatch filter may not * catch all traffic in multi PCI function scenarios.) */ table->eft_unicst_filter_count = 0; rc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags); if (all_unicst || (rc != 0)) { all_unicst_rc = ef10_filter_insert_all_unicast(enp, filter_flags); if ((rc != 0) && (all_unicst_rc != 0)) goto fail1; } /* * WORKAROUND_BUG26807 controls firmware support for chained multicast * filters, and can only be enabled or disabled when the hardware filter * table is empty. * * Chained multicast filters require support from the datapath firmware, * and may not be available (e.g. low-latency variants or old Huntington * firmware). * * Firmware will reset (FLR) functions which have inserted filters in * the hardware filter table when the workaround is enabled/disabled. * Functions without any hardware filters are not reset. * * Re-check if the workaround is enabled after adding unicast hardware * filters. This ensures that encp->enc_bug26807_workaround matches the * firmware state, and that later changes to enable/disable the * workaround will result in this function seeing a reset (FLR). * * In common-code drivers, we only support multiple PCI function * scenarios with firmware that supports multicast chaining, so we can * assume it is enabled for such cases and hence simplify the filter * insertion logic. Firmware that does not support multicast chaining * does not support multiple PCI function configurations either, so * filter insertion is much simpler and the same strategies can still be * used. */ if ((rc = ef10_filter_get_workarounds(enp)) != 0) goto fail2; if ((table->eft_using_all_mulcst != all_mulcst) && (encp->enc_bug26807_workaround == B_TRUE)) { /* * Multicast filter chaining is enabled, so traffic that matches * more than one multicast filter will be replicated and * delivered to multiple recipients. To avoid this duplicate * delivery, remove old multicast filters before inserting new * multicast filters. */ ef10_filter_remove_old(enp); } /* Insert or renew multicast filters */ if (all_mulcst == B_TRUE) { /* * Insert the all multicast filter. If that fails, try to insert * all of our multicast filters (but without rollback on * failure). */ all_mulcst_rc = ef10_filter_insert_all_multicast(enp, filter_flags); if (all_mulcst_rc != 0) { rc = ef10_filter_insert_multicast_list(enp, B_TRUE, brdcst, addrs, count, filter_flags, B_FALSE); if (rc != 0) goto fail3; } } else { /* * Insert filters for multicast addresses. * If any insertion fails, then rollback and try to insert the * all multicast filter instead. * If that also fails, try to insert all of the multicast * filters (but without rollback on failure). */ rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst, addrs, count, filter_flags, B_TRUE); if (rc != 0) { if ((table->eft_using_all_mulcst == B_FALSE) && (encp->enc_bug26807_workaround == B_TRUE)) { /* * Multicast filter chaining is on, so remove * old filters before inserting the multicast * all filter to avoid duplicate delivery caused * by packets matching multiple filters. */ ef10_filter_remove_old(enp); } rc = ef10_filter_insert_all_multicast(enp, filter_flags); if (rc != 0) { rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst, addrs, count, filter_flags, B_FALSE); if (rc != 0) goto fail4; } } } /* Remove old filters which were not renewed */ ef10_filter_remove_old(enp); /* report if any optional flags were rejected */ if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) || ((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) { rc = ENOTSUP; } return (rc); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); /* Clear auto old flags */ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) { if (ef10_filter_entry_is_auto_old(table, i)) { ef10_filter_set_entry_not_auto_old(table, i); } } return (rc); } void ef10_filter_get_default_rxq( __in efx_nic_t *enp, __out efx_rxq_t **erpp, __out boolean_t *using_rss) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; *erpp = table->eft_default_rxq; *using_rss = table->eft_using_rss; } void ef10_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; #if EFSYS_OPT_RX_SCALE EFSYS_ASSERT((using_rss == B_FALSE) || (enp->en_rss_context != EF10_RSS_CONTEXT_INVALID)); table->eft_using_rss = using_rss; #else EFSYS_ASSERT(using_rss == B_FALSE); table->eft_using_rss = B_FALSE; #endif table->eft_default_rxq = erp; } void ef10_filter_default_rxq_clear( __in efx_nic_t *enp) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; table->eft_default_rxq = NULL; table->eft_using_rss = B_FALSE; } #endif /* EFSYS_OPT_FILTER */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: head/sys/dev/sfxge/common/ef10_nvram.c =================================================================== --- head/sys/dev/sfxge/common/ef10_nvram.c (revision 310692) +++ head/sys/dev/sfxge/common/ef10_nvram.c (revision 310693) @@ -1,2368 +1,2368 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM #include "ef10_tlv_layout.h" /* Cursor for TLV partition format */ typedef struct tlv_cursor_s { uint32_t *block; /* Base of data block */ uint32_t *current; /* Cursor position */ uint32_t *end; /* End tag position */ uint32_t *limit; /* Last dword of data block */ } tlv_cursor_t; typedef struct nvram_partition_s { uint16_t type; uint8_t chip_select; uint8_t flags; /* * The full length of the NVRAM partition. * This is different from tlv_partition_header.total_length, * which can be smaller. */ uint32_t length; uint32_t erase_size; uint32_t *data; tlv_cursor_t tlv_cursor; } nvram_partition_t; static __checkReturn efx_rc_t tlv_validate_state( __inout tlv_cursor_t *cursor); static void tlv_init_block( __out uint32_t *block) { *block = __CPU_TO_LE_32(TLV_TAG_END); } static uint32_t tlv_tag( __in tlv_cursor_t *cursor) { uint32_t dword, tag; dword = cursor->current[0]; tag = __LE_TO_CPU_32(dword); return (tag); } static size_t tlv_length( __in tlv_cursor_t *cursor) { uint32_t dword, length; if (tlv_tag(cursor) == TLV_TAG_END) return (0); dword = cursor->current[1]; length = __LE_TO_CPU_32(dword); return ((size_t)length); } static uint8_t * tlv_value( __in tlv_cursor_t *cursor) { if (tlv_tag(cursor) == TLV_TAG_END) return (NULL); return ((uint8_t *)(&cursor->current[2])); } static uint8_t * tlv_item( __in tlv_cursor_t *cursor) { if (tlv_tag(cursor) == TLV_TAG_END) return (NULL); return ((uint8_t *)cursor->current); } /* * TLV item DWORD length is tag + length + value (rounded up to DWORD) * equivalent to tlv_n_words_for_len in mc-comms tlv.c */ #define TLV_DWORD_COUNT(length) \ (1 + 1 + (((length) + sizeof (uint32_t) - 1) / sizeof (uint32_t))) static uint32_t * tlv_next_item_ptr( __in tlv_cursor_t *cursor) { uint32_t length; length = tlv_length(cursor); return (cursor->current + TLV_DWORD_COUNT(length)); } static __checkReturn efx_rc_t tlv_advance( __inout tlv_cursor_t *cursor) { efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (cursor->current == cursor->end) { /* No more tags after END tag */ cursor->current = NULL; rc = ENOENT; goto fail2; } /* Advance to next item and validate */ cursor->current = tlv_next_item_ptr(cursor); if ((rc = tlv_validate_state(cursor)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static efx_rc_t tlv_rewind( __in tlv_cursor_t *cursor) { efx_rc_t rc; cursor->current = cursor->block; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static efx_rc_t tlv_find( __inout tlv_cursor_t *cursor, __in uint32_t tag) { efx_rc_t rc; rc = tlv_rewind(cursor); while (rc == 0) { if (tlv_tag(cursor) == tag) break; rc = tlv_advance(cursor); } return (rc); } static __checkReturn efx_rc_t tlv_validate_state( __inout tlv_cursor_t *cursor) { efx_rc_t rc; /* Check cursor position */ if (cursor->current < cursor->block) { rc = EINVAL; goto fail1; } if (cursor->current > cursor->limit) { rc = EINVAL; goto fail2; } if (tlv_tag(cursor) != TLV_TAG_END) { /* Check current item has space for tag and length */ if (cursor->current > (cursor->limit - 2)) { cursor->current = NULL; rc = EFAULT; goto fail3; } /* Check we have value data for current item and another tag */ if (tlv_next_item_ptr(cursor) > (cursor->limit - 1)) { cursor->current = NULL; rc = EFAULT; goto fail4; } } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static efx_rc_t tlv_init_cursor( __out tlv_cursor_t *cursor, __in uint32_t *block, __in uint32_t *limit, __in uint32_t *current) { cursor->block = block; cursor->limit = limit; cursor->current = current; cursor->end = NULL; return (tlv_validate_state(cursor)); } static __checkReturn efx_rc_t tlv_init_cursor_from_size( __out tlv_cursor_t *cursor, __in_bcount(size) uint8_t *block, __in size_t size) { uint32_t *limit; limit = (uint32_t *)(block + size - sizeof (uint32_t)); return (tlv_init_cursor(cursor, (uint32_t *)block, limit, (uint32_t *)block)); } static __checkReturn efx_rc_t tlv_init_cursor_at_offset( __out tlv_cursor_t *cursor, __in_bcount(size) uint8_t *block, __in size_t size, __in size_t offset) { uint32_t *limit; uint32_t *current; limit = (uint32_t *)(block + size - sizeof (uint32_t)); current = (uint32_t *)(block + offset); return (tlv_init_cursor(cursor, (uint32_t *)block, limit, current)); } static __checkReturn efx_rc_t tlv_require_end( __inout tlv_cursor_t *cursor) { uint32_t *pos; efx_rc_t rc; if (cursor->end == NULL) { pos = cursor->current; if ((rc = tlv_find(cursor, TLV_TAG_END)) != 0) goto fail1; cursor->end = cursor->current; cursor->current = pos; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static size_t tlv_block_length_used( __inout tlv_cursor_t *cursor) { efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if ((rc = tlv_require_end(cursor)) != 0) goto fail2; /* Return space used (including the END tag) */ return (cursor->end + 1 - cursor->block) * sizeof (uint32_t); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (0); } static uint32_t * tlv_last_segment_end( __in tlv_cursor_t *cursor) { tlv_cursor_t segment_cursor; uint32_t *last_segment_end = cursor->block; uint32_t *segment_start = cursor->block; /* * Go through each segment and check that it has an end tag. If there * is no end tag then the previous segment was the last valid one, * so return the pointer to its end tag. */ for (;;) { if (tlv_init_cursor(&segment_cursor, segment_start, cursor->limit, segment_start) != 0) break; if (tlv_require_end(&segment_cursor) != 0) break; last_segment_end = segment_cursor.end; segment_start = segment_cursor.end + 1; } return (last_segment_end); } static uint32_t * tlv_write( __in tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { uint32_t len = size; uint32_t *ptr; ptr = cursor->current; *ptr++ = __CPU_TO_LE_32(tag); *ptr++ = __CPU_TO_LE_32(len); if (len > 0) { ptr[(len - 1) / sizeof (uint32_t)] = 0; memcpy(ptr, data, len); ptr += P2ROUNDUP(len, sizeof (uint32_t)) / sizeof (*ptr); } return (ptr); } static __checkReturn efx_rc_t tlv_insert( __inout tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { unsigned int delta; uint32_t *last_segment_end; efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if ((rc = tlv_require_end(cursor)) != 0) goto fail2; if (tag == TLV_TAG_END) { rc = EINVAL; goto fail3; } last_segment_end = tlv_last_segment_end(cursor); delta = TLV_DWORD_COUNT(size); if (last_segment_end + 1 + delta > cursor->limit) { rc = ENOSPC; goto fail4; } /* Move data up: new space at cursor->current */ memmove(cursor->current + delta, cursor->current, (last_segment_end + 1 - cursor->current) * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end += delta; /* Write new TLV item */ tlv_write(cursor, tag, data, size); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t tlv_delete( __inout tlv_cursor_t *cursor) { unsigned int delta; uint32_t *last_segment_end; efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (tlv_tag(cursor) == TLV_TAG_END) { rc = EINVAL; goto fail2; } delta = TLV_DWORD_COUNT(tlv_length(cursor)); if ((rc = tlv_require_end(cursor)) != 0) goto fail3; last_segment_end = tlv_last_segment_end(cursor); /* Shuffle things down, destroying the item at cursor->current */ memmove(cursor->current, cursor->current + delta, (last_segment_end + 1 - cursor->current) * sizeof (uint32_t)); /* Zero the new space at the end of the TLV chain */ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end -= delta; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t tlv_modify( __inout tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { uint32_t *pos; unsigned int old_ndwords; unsigned int new_ndwords; unsigned int delta; uint32_t *last_segment_end; efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (tlv_tag(cursor) == TLV_TAG_END) { rc = EINVAL; goto fail2; } if (tlv_tag(cursor) != tag) { rc = EINVAL; goto fail3; } old_ndwords = TLV_DWORD_COUNT(tlv_length(cursor)); new_ndwords = TLV_DWORD_COUNT(size); if ((rc = tlv_require_end(cursor)) != 0) goto fail4; last_segment_end = tlv_last_segment_end(cursor); if (new_ndwords > old_ndwords) { /* Expand space used for TLV item */ delta = new_ndwords - old_ndwords; pos = cursor->current + old_ndwords; if (last_segment_end + 1 + delta > cursor->limit) { rc = ENOSPC; goto fail5; } /* Move up: new space at (cursor->current + old_ndwords) */ memmove(pos + delta, pos, (last_segment_end + 1 - pos) * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end += delta; } else if (new_ndwords < old_ndwords) { /* Shrink space used for TLV item */ delta = old_ndwords - new_ndwords; pos = cursor->current + new_ndwords; /* Move down: remove words at (cursor->current + new_ndwords) */ memmove(pos, pos + delta, (last_segment_end + 1 - pos) * sizeof (uint32_t)); /* Zero the new space at the end of the TLV chain */ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end -= delta; } /* Write new data */ tlv_write(cursor, tag, data, size); return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static uint32_t checksum_tlv_partition( __in nvram_partition_t *partition) { tlv_cursor_t *cursor; uint32_t *ptr; uint32_t *end; uint32_t csum; size_t len; cursor = &partition->tlv_cursor; len = tlv_block_length_used(cursor); EFSYS_ASSERT3U((len & 3), ==, 0); csum = 0; ptr = partition->data; end = &ptr[len >> 2]; while (ptr < end) csum += __LE_TO_CPU_32(*ptr++); return (csum); } static __checkReturn efx_rc_t tlv_update_partition_len_and_cks( __in tlv_cursor_t *cursor) { efx_rc_t rc; nvram_partition_t partition; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t new_len; /* * We just modified the partition, so the total length may not be * valid. Don't use tlv_find(), which performs some sanity checks * that may fail here. */ partition.data = cursor->block; memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor)); header = (struct tlv_partition_header *)partition.data; /* Sanity check. */ if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) { rc = EFAULT; goto fail1; } new_len = tlv_block_length_used(&partition.tlv_cursor); if (new_len == 0) { rc = EFAULT; goto fail2; } header->total_length = __CPU_TO_LE_32(new_len); /* Ensure the modified partition always has a new generation count. */ header->generation = __CPU_TO_LE_32( __LE_TO_CPU_32(header->generation) + 1); trailer = (struct tlv_partition_trailer *)((uint8_t *)header + new_len - sizeof (*trailer) - sizeof (uint32_t)); trailer->generation = header->generation; trailer->checksum = __CPU_TO_LE_32( __LE_TO_CPU_32(trailer->checksum) - checksum_tlv_partition(&partition)); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Validate buffer contents (before writing to flash) */ __checkReturn efx_rc_t ef10_nvram_buffer_validate( __in efx_nic_t *enp, __in uint32_t partn, __in_bcount(partn_size) caddr_t partn_data, __in size_t partn_size) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t total_length; uint32_t cksum; int pos; efx_rc_t rc; EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK); if ((partn_data == NULL) || (partn_size == 0)) { rc = EINVAL; goto fail1; } /* The partition header must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)partn_data, partn_size)) != 0) { rc = EFAULT; goto fail2; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail3; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV partition length (includes the END tag) */ total_length = __LE_TO_CPU_32(header->total_length); if (total_length > partn_size) { rc = EFBIG; goto fail4; } /* Check partition ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail5; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail6; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail7; } /* Check generation counts are consistent */ if (trailer->generation != header->generation) { rc = EINVAL; goto fail8; } /* Verify partition checksum */ cksum = 0; for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(partn_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail9; } return (0); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_create( __in efx_nic_t *enp, __in uint16_t partn_type, __in_bcount(partn_size) caddr_t partn_data, __in size_t partn_size) { uint32_t *buf = (uint32_t *)partn_data; efx_rc_t rc; tlv_cursor_t cursor; struct tlv_partition_header header; struct tlv_partition_trailer trailer; - unsigned min_buf_size = sizeof (struct tlv_partition_header) + + unsigned int min_buf_size = sizeof (struct tlv_partition_header) + sizeof (struct tlv_partition_trailer); if (partn_size < min_buf_size) { rc = EINVAL; goto fail1; } memset(buf, 0xff, partn_size); tlv_init_block(buf); if ((rc = tlv_init_cursor(&cursor, buf, (uint32_t *)((uint8_t *)buf + partn_size), buf)) != 0) { goto fail2; } header.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_HEADER); header.length = __CPU_TO_LE_32(sizeof (header) - 8); header.type_id = __CPU_TO_LE_16(partn_type); header.preset = 0; header.generation = __CPU_TO_LE_32(1); header.total_length = 0; /* This will be fixed below. */ if ((rc = tlv_insert( &cursor, TLV_TAG_PARTITION_HEADER, (uint8_t *)&header.type_id, sizeof (header) - 8)) != 0) goto fail3; if ((rc = tlv_advance(&cursor)) != 0) goto fail4; trailer.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_TRAILER); trailer.length = __CPU_TO_LE_32(sizeof (trailer) - 8); trailer.generation = header.generation; trailer.checksum = 0; /* This will be fixed below. */ if ((rc = tlv_insert(&cursor, TLV_TAG_PARTITION_TRAILER, (uint8_t *)&trailer.generation, sizeof (trailer) - 8)) != 0) goto fail5; if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0) goto fail6; /* Check that the partition is valid. */ if ((rc = ef10_nvram_buffer_validate(enp, partn_type, partn_data, partn_size)) != 0) goto fail7; return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static uint32_t byte_offset( __in uint32_t *position, __in uint32_t *base) { return (uint32_t)((uint8_t *)position - (uint8_t *)base); } __checkReturn efx_rc_t ef10_nvram_buffer_find_item_start( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __out uint32_t *startp) { /* Read past partition header to find start address of the first key */ tlv_cursor_t cursor; efx_rc_t rc; /* A PARTITION_HEADER tag must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail3; } *startp = byte_offset(cursor.current, cursor.block); if ((rc = tlv_require_end(&cursor)) != 0) goto fail4; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_find_end( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *endp) { /* Read to end of partition */ tlv_cursor_t cursor; efx_rc_t rc; uint32_t *segment_used; _NOTE(ARGUNUSED(offset)) if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } segment_used = cursor.block; /* * Go through each segment and check that it has an end tag. If there * is no end tag then the previous segment was the last valid one, * so return the used space including that end tag. */ while (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) { if (tlv_require_end(&cursor) != 0) { if (segment_used == cursor.block) { /* * First segment is corrupt, so there is * no valid data in partition. */ rc = EINVAL; goto fail2; } break; } segment_used = cursor.end + 1; cursor.current = segment_used; } /* Return space used (including the END tag) */ *endp = (segment_used - cursor.block) * sizeof (uint32_t); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn __success(return != B_FALSE) boolean_t ef10_nvram_buffer_find_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, __out uint32_t *lengthp) { /* Find TLV at offset and return key start and length */ tlv_cursor_t cursor; uint8_t *key; uint32_t tag; if (tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset) != 0) { return (B_FALSE); } while ((key = tlv_item(&cursor)) != NULL) { tag = tlv_tag(&cursor); if (tag == TLV_TAG_PARTITION_HEADER || tag == TLV_TAG_PARTITION_TRAILER) { if (tlv_advance(&cursor) != 0) { break; } continue; } *startp = byte_offset(cursor.current, cursor.block); *lengthp = byte_offset(tlv_next_item_ptr(&cursor), cursor.current); return (B_TRUE); } return (B_FALSE); } __checkReturn efx_rc_t ef10_nvram_buffer_get_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(item_max_size, *lengthp) caddr_t itemp, __in size_t item_max_size, __out uint32_t *lengthp) { efx_rc_t rc; tlv_cursor_t cursor; uint32_t item_length; if (item_max_size < length) { rc = ENOSPC; goto fail1; } if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset)) != 0) { goto fail2; } item_length = tlv_length(&cursor); if (length < item_length) { rc = ENOSPC; goto fail3; } memcpy(itemp, tlv_value(&cursor), item_length); *lengthp = item_length; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_insert_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, __out uint32_t *lengthp) { efx_rc_t rc; tlv_cursor_t cursor; if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset)) != 0) { goto fail1; } rc = tlv_insert(&cursor, TLV_TAG_LICENSE, (uint8_t *)keyp, length); if (rc != 0) { goto fail2; } *lengthp = byte_offset(tlv_next_item_ptr(&cursor), cursor.current); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_delete_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end) { efx_rc_t rc; tlv_cursor_t cursor; _NOTE(ARGUNUSED(length, end)) if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset)) != 0) { goto fail1; } if ((rc = tlv_delete(&cursor)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_finish( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size) { efx_rc_t rc; tlv_cursor_t cursor; if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } if ((rc = tlv_require_end(&cursor)) != 0) goto fail2; if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Read and validate a segment from a partition. A segment is a complete * tlv chain between PARTITION_HEADER and PARTITION_END tags. There may * be multiple segments in a partition, so seg_offset allows segments * beyond the first to be read. */ static __checkReturn efx_rc_t ef10_nvram_read_tlv_segment( __in efx_nic_t *enp, __in uint32_t partn, __in size_t seg_offset, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t total_length; uint32_t cksum; int pos; efx_rc_t rc; EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK); if ((seg_data == NULL) || (max_seg_size == 0)) { rc = EINVAL; goto fail1; } /* Read initial chunk of the segment, starting at offset */ if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset, seg_data, EF10_NVRAM_CHUNK, MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) { goto fail2; } /* A PARTITION_HEADER tag must be the first item at the given offset */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail3; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail4; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV segment length (includes the END tag) */ total_length = __LE_TO_CPU_32(header->total_length); if (total_length > max_seg_size) { rc = EFBIG; goto fail5; } /* Read the remaining segment content */ if (total_length > EF10_NVRAM_CHUNK) { if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset + EF10_NVRAM_CHUNK, seg_data + EF10_NVRAM_CHUNK, total_length - EF10_NVRAM_CHUNK, MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) goto fail6; } /* Check segment ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail7; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail8; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail9; } /* Check data read from segment is consistent */ if (trailer->generation != header->generation) { /* * The partition data may have been modified between successive * MCDI NVRAM_READ requests by the MC or another PCI function. * * The caller must retry to obtain consistent partition data. */ rc = EAGAIN; goto fail10; } /* Verify segment checksum */ cksum = 0; for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail11; } return (0); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Read a single TLV item from a host memory * buffer containing a TLV formatted segment. */ __checkReturn efx_rc_t ef10_nvram_buf_read_tlv( __in efx_nic_t *enp, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep) { tlv_cursor_t cursor; caddr_t data; size_t length; caddr_t value; efx_rc_t rc; if ((seg_data == NULL) || (max_seg_size == 0)) { rc = EINVAL; goto fail1; } /* Find requested TLV tag in segment data */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail2; } if ((rc = tlv_find(&cursor, tag)) != 0) { rc = ENOENT; goto fail3; } value = (caddr_t)tlv_value(&cursor); length = tlv_length(&cursor); if (length == 0) data = NULL; else { /* Copy out data from TLV item */ EFSYS_KMEM_ALLOC(enp->en_esip, length, data); if (data == NULL) { rc = ENOMEM; goto fail4; } memcpy(data, value, length); } *datap = data; *sizep = length; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Read a single TLV item from the first segment in a TLV formatted partition */ __checkReturn efx_rc_t ef10_nvram_partn_read_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __deref_out_bcount_opt(*seg_sizep) caddr_t *seg_datap, __out size_t *seg_sizep) { caddr_t seg_data = NULL; size_t partn_size = 0; size_t length; caddr_t data; int retry; efx_rc_t rc; /* Allocate sufficient memory for the entire partition */ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0) goto fail1; if (partn_size == 0) { rc = ENOENT; goto fail2; } EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, seg_data); if (seg_data == NULL) { rc = ENOMEM; goto fail3; } /* * Read the first segment in a TLV partition. Retry until consistent * segment contents are returned. Inconsistent data may be read if: * a) the segment contents are invalid * b) the MC has rebooted while we were reading the partition * c) the partition has been modified while we were reading it * Limit retry attempts to ensure forward progress. */ retry = 10; do { rc = ef10_nvram_read_tlv_segment(enp, partn, 0, seg_data, partn_size); } while ((rc == EAGAIN) && (--retry > 0)); if (rc != 0) { /* Failed to obtain consistent segment data */ goto fail4; } if ((rc = ef10_nvram_buf_read_tlv(enp, seg_data, partn_size, tag, &data, &length)) != 0) goto fail5; EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data); *seg_datap = data; *seg_sizep = length; return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Compute the size of a segment. */ static __checkReturn efx_rc_t ef10_nvram_buf_segment_size( __in caddr_t seg_data, __in size_t max_seg_size, __out size_t *seg_sizep) { efx_rc_t rc; tlv_cursor_t cursor; struct tlv_partition_header *header; uint32_t cksum; int pos; uint32_t *end_tag_position; uint32_t segment_length; /* A PARTITION_HEADER tag must be the first item at the given offset */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV segment length (includes the END tag) */ *seg_sizep = __LE_TO_CPU_32(header->total_length); if (*seg_sizep > max_seg_size) { rc = EFBIG; goto fail3; } /* Check segment ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail4; } if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail5; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail6; } end_tag_position = cursor.current; /* Verify segment checksum */ cksum = 0; for (pos = 0; (size_t)pos < *seg_sizep; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail7; } /* * Calculate total length from HEADER to END tags and compare to * max_seg_size and the total_length field in the HEADER tag. */ segment_length = tlv_block_length_used(&cursor); if (segment_length > max_seg_size) { rc = EINVAL; goto fail8; } if (segment_length != *seg_sizep) { rc = EINVAL; goto fail9; } /* Skip over the first HEADER tag. */ rc = tlv_rewind(&cursor); rc = tlv_advance(&cursor); while (rc == 0) { if (tlv_tag(&cursor) == TLV_TAG_END) { /* Check that the END tag is the one found earlier. */ if (cursor.current != end_tag_position) goto fail10; break; } /* Check for duplicate HEADER tags before the END tag. */ if (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail11; } rc = tlv_advance(&cursor); } if (rc != 0) goto fail12; return (0); fail12: EFSYS_PROBE(fail12); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in a host memory buffer containing a TLV * formatted segment. Historically partitions consisted of only one segment. */ __checkReturn efx_rc_t ef10_nvram_buf_write_tlv( __inout_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __in_bcount(tag_size) caddr_t tag_data, __in size_t tag_size, __out size_t *total_lengthp) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; uint32_t generation; uint32_t cksum; int pos; efx_rc_t rc; /* A PARTITION_HEADER tag must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Update the TLV chain to contain the new data */ if ((rc = tlv_find(&cursor, tag)) == 0) { /* Modify existing TLV item */ if ((rc = tlv_modify(&cursor, tag, (uint8_t *)tag_data, tag_size)) != 0) goto fail3; } else { /* Insert a new TLV item before the PARTITION_TRAILER */ rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER); if (rc != 0) { rc = EINVAL; goto fail4; } if ((rc = tlv_insert(&cursor, tag, (uint8_t *)tag_data, tag_size)) != 0) { rc = EINVAL; goto fail5; } } /* Find the trailer tag */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail6; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); /* Update PARTITION_HEADER and PARTITION_TRAILER fields */ *total_lengthp = tlv_block_length_used(&cursor); if (*total_lengthp > max_seg_size) { rc = ENOSPC; goto fail7; } generation = __LE_TO_CPU_32(header->generation) + 1; header->total_length = __CPU_TO_LE_32(*total_lengthp); header->generation = __CPU_TO_LE_32(generation); trailer->generation = __CPU_TO_LE_32(generation); /* Recompute PARTITION_TRAILER checksum */ trailer->checksum = 0; cksum = 0; for (pos = 0; (size_t)pos < *total_lengthp; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } trailer->checksum = ~cksum + 1; return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in the first segment of a TLV formatted * dynamic config partition. The first segment is the current active * configuration. */ __checkReturn efx_rc_t ef10_nvram_partn_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size) { return ef10_nvram_partn_write_segment_tlv(enp, partn, tag, data, size, B_FALSE); } /* * Read a segment from nvram at the given offset into a buffer (segment_data) * and optionally write a new tag to it. */ static __checkReturn efx_rc_t ef10_nvram_segment_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __inout caddr_t *seg_datap, __inout size_t *partn_offsetp, __inout size_t *src_remain_lenp, __inout size_t *dest_remain_lenp, __in boolean_t write) { efx_rc_t rc; efx_rc_t status; size_t original_segment_size; size_t modified_segment_size; /* * Read the segment from NVRAM into the segment_data buffer and validate * it, returning if it does not validate. This is not a failure unless * this is the first segment in a partition. In this case the caller * must propagate the error. */ status = ef10_nvram_read_tlv_segment(enp, partn, *partn_offsetp, *seg_datap, *src_remain_lenp); if (status != 0) { rc = EINVAL; goto fail1; } status = ef10_nvram_buf_segment_size(*seg_datap, *src_remain_lenp, &original_segment_size); if (status != 0) { rc = EINVAL; goto fail2; } if (write) { /* Update the contents of the segment in the buffer */ if ((rc = ef10_nvram_buf_write_tlv(*seg_datap, *dest_remain_lenp, tag, data, size, &modified_segment_size)) != 0) { goto fail3; } *dest_remain_lenp -= modified_segment_size; *seg_datap += modified_segment_size; } else { /* * We won't modify this segment, but still need to update the * remaining lengths and pointers. */ *dest_remain_lenp -= original_segment_size; *seg_datap += original_segment_size; } *partn_offsetp += original_segment_size; *src_remain_lenp -= original_segment_size; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in either the first segment or in all * segments in a TLV formatted dynamic config partition. Dynamic config * partitions on boards that support RFID are divided into a number of segments, * each formatted like a partition, with header, trailer and end tags. The first * segment is the current active configuration. * * The segments are initialised by manftest and each contain a different * configuration e.g. firmware variant. The firmware can be instructed * via RFID to copy a segment to replace the first segment, hence changing the * active configuration. This allows ops to change the configuration of a board * prior to shipment using RFID. * * Changes to the dynamic config may need to be written to all segments (e.g. * firmware versions) or just the first segment (changes to the active * configuration). See SF-111324-SW "The use of RFID in Solarflare Products". * If only the first segment is written the code still needs to be aware of the * possible presence of subsequent segments as writing to a segment may cause * its size to increase, which would overwrite the subsequent segments and * invalidate them. */ __checkReturn efx_rc_t ef10_nvram_partn_write_segment_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t all_segments) { size_t partn_size = 0; caddr_t partn_data; size_t total_length = 0; efx_rc_t rc; size_t current_offset = 0; size_t remaining_original_length; size_t remaining_modified_length; caddr_t segment_data; EFSYS_ASSERT3U(partn, ==, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG); /* Allocate sufficient memory for the entire partition */ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0) goto fail1; EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, partn_data); if (partn_data == NULL) { rc = ENOMEM; goto fail2; } remaining_original_length = partn_size; remaining_modified_length = partn_size; segment_data = partn_data; /* Lock the partition */ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0) goto fail3; /* Iterate over each (potential) segment to update it. */ do { boolean_t write = all_segments || current_offset == 0; rc = ef10_nvram_segment_write_tlv(enp, partn, tag, data, size, &segment_data, ¤t_offset, &remaining_original_length, &remaining_modified_length, write); if (rc != 0) { if (current_offset == 0) { /* * If no data has been read then the first * segment is invalid, which is an error. */ goto fail4; } break; } } while (current_offset < partn_size); total_length = segment_data - partn_data; /* * We've run out of space. This should actually be dealt with by * ef10_nvram_buf_write_tlv returning ENOSPC. */ if (total_length > partn_size) { rc = ENOSPC; goto fail5; } /* Erase the whole partition in NVRAM */ if ((rc = ef10_nvram_partn_erase(enp, partn, 0, partn_size)) != 0) goto fail6; /* Write new partition contents from the buffer to NVRAM */ if ((rc = ef10_nvram_partn_write(enp, partn, 0, partn_data, total_length)) != 0) goto fail7; /* Unlock the partition */ ef10_nvram_partn_unlock(enp, partn); EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data); return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); ef10_nvram_partn_unlock(enp, partn); fail3: EFSYS_PROBE(fail3); EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Get the size of a NVRAM partition. This is the total size allocated in nvram, * not the data used by the segments in the partition. */ __checkReturn efx_rc_t ef10_nvram_partn_size( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *sizep) { efx_rc_t rc; if ((rc = efx_mcdi_nvram_info(enp, partn, sizep, NULL, NULL, NULL)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_lock( __in efx_nic_t *enp, __in uint32_t partn) { efx_rc_t rc; if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_read_mode( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size, __in uint32_t mode) { size_t chunk; efx_rc_t rc; while (size > 0) { chunk = MIN(size, EF10_NVRAM_CHUNK); if ((rc = efx_mcdi_nvram_read(enp, partn, offset, data, chunk, mode)) != 0) { goto fail1; } size -= chunk; data += chunk; offset += chunk; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_read( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { /* * Read requests which come in through the EFX API expect to * read the current, active partition. */ return ef10_nvram_partn_read_mode(enp, partn, offset, data, size, MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT); } __checkReturn efx_rc_t ef10_nvram_partn_erase( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __in size_t size) { efx_rc_t rc; uint32_t erase_size; if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL, &erase_size, NULL)) != 0) goto fail1; if (erase_size == 0) { if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) goto fail2; } else { if (size % erase_size != 0) { rc = EINVAL; goto fail3; } while (size > 0) { if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, erase_size)) != 0) goto fail4; offset += erase_size; size -= erase_size; } } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_write( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { size_t chunk; uint32_t write_size; efx_rc_t rc; if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL, NULL, &write_size)) != 0) goto fail1; if (write_size != 0) { /* * Check that the size is a multiple of the write chunk size if * the write chunk size is available. */ if (size % write_size != 0) { rc = EINVAL; goto fail2; } } else { write_size = EF10_NVRAM_CHUNK; } while (size > 0) { chunk = MIN(size, write_size); if ((rc = efx_mcdi_nvram_write(enp, partn, offset, data, chunk)) != 0) { goto fail3; } size -= chunk; data += chunk; offset += chunk; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_nvram_partn_unlock( __in efx_nic_t *enp, __in uint32_t partn) { boolean_t reboot; efx_rc_t rc; reboot = B_FALSE; if ((rc = efx_mcdi_nvram_update_finish(enp, partn, reboot)) != 0) goto fail1; return; fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); } __checkReturn efx_rc_t ef10_nvram_partn_set_version( __in efx_nic_t *enp, __in uint32_t partn, __in_ecount(4) uint16_t version[4]) { struct tlv_partition_version partn_version; size_t size; efx_rc_t rc; /* Add or modify partition version TLV item */ partn_version.version_w = __CPU_TO_LE_16(version[0]); partn_version.version_x = __CPU_TO_LE_16(version[1]); partn_version.version_y = __CPU_TO_LE_16(version[2]); partn_version.version_z = __CPU_TO_LE_16(version[3]); size = sizeof (partn_version) - (2 * sizeof (uint32_t)); /* Write the version number to all segments in the partition */ if ((rc = ef10_nvram_partn_write_segment_tlv(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, TLV_TAG_PARTITION_VERSION(partn), (caddr_t)&partn_version.version_w, size, B_TRUE)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ #if EFSYS_OPT_NVRAM typedef struct ef10_parttbl_entry_s { unsigned int partn; unsigned int port; efx_nvram_type_t nvtype; } ef10_parttbl_entry_t; /* Translate EFX NVRAM types to firmware partition types */ static ef10_parttbl_entry_t hunt_parttbl[] = { {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 3, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 4, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE} }; static ef10_parttbl_entry_t medford_parttbl[] = { {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 2, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 3, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 4, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE} }; static __checkReturn efx_rc_t ef10_parttbl_get( __in efx_nic_t *enp, __out ef10_parttbl_entry_t **parttblp, __out size_t *parttbl_rowsp) { switch (enp->en_family) { case EFX_FAMILY_HUNTINGTON: *parttblp = hunt_parttbl; *parttbl_rowsp = EFX_ARRAY_SIZE(hunt_parttbl); break; case EFX_FAMILY_MEDFORD: *parttblp = medford_parttbl; *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl); break; default: EFSYS_ASSERT(B_FALSE); return (EINVAL); } return (0); } __checkReturn efx_rc_t ef10_nvram_type_to_partn( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *partnp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); ef10_parttbl_entry_t *parttbl = NULL; size_t parttbl_rows = 0; unsigned int i; EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); EFSYS_ASSERT(partnp != NULL); if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) { for (i = 0; i < parttbl_rows; i++) { ef10_parttbl_entry_t *entry = &parttbl[i]; if (entry->nvtype == type && entry->port == emip->emi_port) { *partnp = entry->partn; return (0); } } } return (ENOTSUP); } #if EFSYS_OPT_DIAG static __checkReturn efx_rc_t ef10_nvram_partn_to_type( __in efx_nic_t *enp, __in uint32_t partn, __out efx_nvram_type_t *typep) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); ef10_parttbl_entry_t *parttbl = NULL; size_t parttbl_rows = 0; unsigned int i; EFSYS_ASSERT(typep != NULL); if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) { for (i = 0; i < parttbl_rows; i++) { ef10_parttbl_entry_t *entry = &parttbl[i]; if (entry->partn == partn && entry->port == emip->emi_port) { *typep = entry->nvtype; return (0); } } } return (ENOTSUP); } __checkReturn efx_rc_t ef10_nvram_test( __in efx_nic_t *enp) { efx_nvram_type_t type; unsigned int npartns = 0; uint32_t *partns = NULL; size_t size; unsigned int i; efx_rc_t rc; /* Read available partitions from NVRAM partition map */ size = MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM * sizeof (uint32_t); EFSYS_KMEM_ALLOC(enp->en_esip, size, partns); if (partns == NULL) { rc = ENOMEM; goto fail1; } if ((rc = efx_mcdi_nvram_partitions(enp, (caddr_t)partns, size, &npartns)) != 0) { goto fail2; } for (i = 0; i < npartns; i++) { /* Check if the partition is supported for this port */ if ((rc = ef10_nvram_partn_to_type(enp, partns[i], &type)) != 0) continue; if ((rc = efx_mcdi_nvram_test(enp, partns[i])) != 0) goto fail3; } EFSYS_KMEM_FREE(enp->en_esip, size, partns); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, size, partns); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ __checkReturn efx_rc_t ef10_nvram_partn_get_version( __in efx_nic_t *enp, __in uint32_t partn, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]) { efx_rc_t rc; /* FIXME: get highest partn version from all ports */ /* FIXME: return partn description if available */ if ((rc = efx_mcdi_nvram_metadata(enp, partn, subtypep, version, NULL, 0)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_rw_start( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *chunk_sizep) { efx_rc_t rc; if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0) goto fail1; if (chunk_sizep != NULL) *chunk_sizep = EF10_NVRAM_CHUNK; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_nvram_partn_rw_finish( __in efx_nic_t *enp, __in uint32_t partn) { ef10_nvram_partn_unlock(enp, partn); } #endif /* EFSYS_OPT_NVRAM */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: head/sys/dev/sfxge/common/efx_ev.c =================================================================== --- head/sys/dev/sfxge/common/efx_ev.c (revision 310692) +++ head/sys/dev/sfxge/common/efx_ev.c (revision 310693) @@ -1,1458 +1,1458 @@ /*- * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_MON_MCDI #include "mcdi_mon.h" #endif #if EFSYS_OPT_QSTATS #define EFX_EV_QSTAT_INCR(_eep, _stat) \ do { \ (_eep)->ee_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_EV_QSTAT_INCR(_eep, _stat) #endif #define EFX_EV_PRESENT(_qword) \ (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \ EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff) #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_ev_init( __in efx_nic_t *enp); static void siena_ev_fini( __in efx_nic_t *enp); static __checkReturn efx_rc_t siena_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint32_t us, __in efx_evq_t *eep); static void siena_ev_qdestroy( __in efx_evq_t *eep); static __checkReturn efx_rc_t siena_ev_qprime( __in efx_evq_t *eep, __in unsigned int count); static void siena_ev_qpoll( __in efx_evq_t *eep, __inout unsigned int *countp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static void siena_ev_qpost( __in efx_evq_t *eep, __in uint16_t data); static __checkReturn efx_rc_t siena_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us); #if EFSYS_OPT_QSTATS static void siena_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); #endif #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA static const efx_ev_ops_t __efx_ev_siena_ops = { siena_ev_init, /* eevo_init */ siena_ev_fini, /* eevo_fini */ siena_ev_qcreate, /* eevo_qcreate */ siena_ev_qdestroy, /* eevo_qdestroy */ siena_ev_qprime, /* eevo_qprime */ siena_ev_qpost, /* eevo_qpost */ siena_ev_qmoderate, /* eevo_qmoderate */ #if EFSYS_OPT_QSTATS siena_ev_qstats_update, /* eevo_qstats_update */ #endif }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD static const efx_ev_ops_t __efx_ev_ef10_ops = { ef10_ev_init, /* eevo_init */ ef10_ev_fini, /* eevo_fini */ ef10_ev_qcreate, /* eevo_qcreate */ ef10_ev_qdestroy, /* eevo_qdestroy */ ef10_ev_qprime, /* eevo_qprime */ ef10_ev_qpost, /* eevo_qpost */ ef10_ev_qmoderate, /* eevo_qmoderate */ #if EFSYS_OPT_QSTATS ef10_ev_qstats_update, /* eevo_qstats_update */ #endif }; #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_ev_init( __in efx_nic_t *enp) { const efx_ev_ops_t *eevop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); if (enp->en_mod_flags & EFX_MOD_EV) { rc = EINVAL; goto fail1; } switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: eevop = &__efx_ev_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: eevop = &__efx_ev_ef10_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: eevop = &__efx_ev_ef10_ops; break; #endif /* EFSYS_OPT_MEDFORD */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); if ((rc = eevop->eevo_init(enp)) != 0) goto fail2; enp->en_eevop = eevop; enp->en_mod_flags |= EFX_MOD_EV; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_eevop = NULL; enp->en_mod_flags &= ~EFX_MOD_EV; return (rc); } void efx_ev_fini( __in efx_nic_t *enp) { const efx_ev_ops_t *eevop = enp->en_eevop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); eevop->eevo_fini(enp); enp->en_eevop = NULL; enp->en_mod_flags &= ~EFX_MOD_EV; } __checkReturn efx_rc_t efx_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint32_t us, __deref_out efx_evq_t **eepp) { const efx_ev_ops_t *eevop = enp->en_eevop; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_evq_t *eep; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit); /* Allocate an EVQ object */ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep); if (eep == NULL) { rc = ENOMEM; goto fail1; } eep->ee_magic = EFX_EVQ_MAGIC; eep->ee_enp = enp; eep->ee_index = index; eep->ee_mask = n - 1; eep->ee_esmp = esmp; /* * Set outputs before the queue is created because interrupts may be * raised for events immediately after the queue is created, before the * function call below returns. See bug58606. * * The eepp pointer passed in by the client must therefore point to data * shared with the client's event processing context. */ enp->en_ev_qcount++; *eepp = eep; if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, us, eep)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); *eepp = NULL; enp->en_ev_qcount--; EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_ev_qdestroy( __in efx_evq_t *eep) { efx_nic_t *enp = eep->ee_enp; const efx_ev_ops_t *eevop = enp->en_eevop; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); EFSYS_ASSERT(enp->en_ev_qcount != 0); --enp->en_ev_qcount; eevop->eevo_qdestroy(eep); /* Free the EVQ object */ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); } __checkReturn efx_rc_t efx_ev_qprime( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; const efx_ev_ops_t *eevop = enp->en_eevop; efx_rc_t rc; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); if (!(enp->en_mod_flags & EFX_MOD_INTR)) { rc = EINVAL; goto fail1; } if ((rc = eevop->eevo_qprime(eep, count)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn boolean_t efx_ev_qpending( __in efx_evq_t *eep, __in unsigned int count) { size_t offset; efx_qword_t qword; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword); return (EFX_EV_PRESENT(qword)); } #if EFSYS_OPT_EV_PREFETCH void efx_ev_qprefetch( __in efx_evq_t *eep, __in unsigned int count) { unsigned int offset; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); } #endif /* EFSYS_OPT_EV_PREFETCH */ void efx_ev_qpoll( __in efx_evq_t *eep, __inout unsigned int *countp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); /* * FIXME: Huntington will require support for hardware event batching * and merging, which will need a different ev_qpoll implementation. * * Without those features the Falcon/Siena code can be used unchanged. */ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN); EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH); EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV); EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV); EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV); EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV == FSE_AZ_EV_CODE_DRV_GEN_EV); #if EFSYS_OPT_MCDI EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV == FSE_AZ_EV_CODE_MCDI_EVRESPONSE); #endif siena_ev_qpoll(eep, countp, eecp, arg); } void efx_ev_qpost( __in efx_evq_t *eep, __in uint16_t data) { efx_nic_t *enp = eep->ee_enp; const efx_ev_ops_t *eevop = enp->en_eevop; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); EFSYS_ASSERT(eevop != NULL && eevop->eevo_qpost != NULL); eevop->eevo_qpost(eep, data); } __checkReturn efx_rc_t efx_ev_usecs_to_ticks( __in efx_nic_t *enp, __in unsigned int us, __out unsigned int *ticksp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); unsigned int ticks; /* Convert microseconds to a timer tick count */ if (us == 0) ticks = 0; else if (us * 1000 < encp->enc_evq_timer_quantum_ns) ticks = 1; /* Never round down to zero */ else ticks = us * 1000 / encp->enc_evq_timer_quantum_ns; *ticksp = ticks; return (0); } __checkReturn efx_rc_t efx_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us) { efx_nic_t *enp = eep->ee_enp; const efx_ev_ops_t *eevop = enp->en_eevop; efx_rc_t rc; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); if ((rc = eevop->eevo_qmoderate(eep, us)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_QSTATS void efx_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) { efx_nic_t *enp = eep->ee_enp; const efx_ev_ops_t *eevop = enp->en_eevop; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); eevop->eevo_qstats_update(eep, stat); } #endif /* EFSYS_OPT_QSTATS */ #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_ev_init( __in efx_nic_t *enp) { efx_oword_t oword; /* * Program the event queue for receive and transmit queue * flush events. */ EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0); EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword); return (0); } static __checkReturn boolean_t siena_ev_rx_not_ok( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in uint32_t label, __in uint32_t id, __inout uint16_t *flagsp) { boolean_t ignore = B_FALSE; if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC); EFSYS_PROBE(tobe_disc); /* * Assume this is a unicast address mismatch, unless below * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or * EV_RX_PAUSE_FRM_ERR is set. */ (*flagsp) |= EFX_ADDR_MISMATCH; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) { EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id); EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); (*flagsp) |= EFX_DISCARD; #if EFSYS_OPT_RX_SCATTER /* * Lookout for payload queue ran dry errors and ignore them. * * Sadly for the header/data split cases, the descriptor * pointer in this event refers to the header queue and * therefore cannot be easily detected as duplicate. * So we drop these and rely on the receive processing seeing * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard * the partially received packet. */ if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) && (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) && (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0)) ignore = B_TRUE; #endif /* EFSYS_OPT_RX_SCATTER */ } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); EFSYS_PROBE(crc_err); (*flagsp) &= ~EFX_ADDR_MISMATCH; (*flagsp) |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR); EFSYS_PROBE(pause_frm_err); (*flagsp) &= ~EFX_ADDR_MISMATCH; (*flagsp) |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR); EFSYS_PROBE(owner_id_err); (*flagsp) |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); EFSYS_PROBE(ipv4_err); (*flagsp) &= ~EFX_CKSUM_IPV4; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); EFSYS_PROBE(udp_chk_err); (*flagsp) &= ~EFX_CKSUM_TCPUDP; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR); /* * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error * condition. */ (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP); } return (ignore); } static __checkReturn boolean_t siena_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t id; uint32_t size; uint32_t label; boolean_t ok; #if EFSYS_OPT_RX_SCATTER boolean_t sop; boolean_t jumbo_cont; #endif /* EFSYS_OPT_RX_SCATTER */ uint32_t hdr_type; boolean_t is_v6; uint16_t flags; boolean_t ignore; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_RX); /* Basic packet information */ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR); size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT); label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL); ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0); #if EFSYS_OPT_RX_SCATTER sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0); jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0); #endif /* EFSYS_OPT_RX_SCATTER */ hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE); is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0); /* * If packet is marked as OK and packet type is TCP/IP or * UDP/IP or other IP, then we can rely on the hardware checksums. */ switch (hdr_type) { case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP: flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP; if (is_v6) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); flags |= EFX_PKT_IPV6; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; } break; case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP: flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP; if (is_v6) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); flags |= EFX_PKT_IPV6; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; } break; case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: if (is_v6) { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); flags = EFX_PKT_IPV6; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4; } break; case FSE_AZ_RX_EV_HDR_TYPE_OTHER: EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); flags = 0; break; default: EFSYS_ASSERT(B_FALSE); flags = 0; break; } #if EFSYS_OPT_RX_SCATTER /* Report scatter and header/lookahead split buffer flags */ if (sop) flags |= EFX_PKT_START; if (jumbo_cont) flags |= EFX_PKT_CONT; #endif /* EFSYS_OPT_RX_SCATTER */ /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */ if (!ok) { ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags); if (ignore) { EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, uint32_t, size, uint16_t, flags); return (B_FALSE); } } /* If we're not discarding the packet then it is ok */ if (~flags & EFX_DISCARD) EFX_EV_QSTAT_INCR(eep, EV_RX_OK); /* Detect multicast packets that didn't match the filter */ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH); } else { EFSYS_PROBE(mcast_mismatch); flags |= EFX_ADDR_MISMATCH; } } else { flags |= EFX_PKT_UNICAST; } /* * The packet parser in Siena can abort parsing packets under * certain error conditions, setting the PKT_NOT_PARSED bit * (which clears PKT_OK). If this is set, then don't trust * the PKT_TYPE field. */ if (!ok) { uint32_t parse_err; parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED); if (parse_err != 0) flags |= EFX_CHECK_VLAN; } if (~flags & EFX_CHECK_VLAN) { uint32_t pkt_type; pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE); if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN) flags |= EFX_PKT_VLAN_TAGGED; } EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, uint32_t, size, uint16_t, flags); EFSYS_ASSERT(eecp->eec_rx != NULL); should_abort = eecp->eec_rx(arg, label, id, size, flags); return (should_abort); } static __checkReturn boolean_t siena_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t id; uint32_t label; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_TX); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 && EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 && EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 && EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) { id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR); label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL); EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); EFSYS_ASSERT(eecp->eec_tx != NULL); should_abort = eecp->eec_tx(arg, label, id); return (should_abort); } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0) EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0) EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0) EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0) EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL); EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED); return (B_FALSE); } static __checkReturn boolean_t siena_ev_global( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { _NOTE(ARGUNUSED(eqp, eecp, arg)) EFX_EV_QSTAT_INCR(eep, EV_GLOBAL); return (B_FALSE); } static __checkReturn boolean_t siena_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRIVER); should_abort = B_FALSE; switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) { case FSE_AZ_TX_DESCQ_FLS_DONE_EV: { uint32_t txq_index; EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); should_abort = eecp->eec_txq_flush_done(arg, txq_index); break; } case FSE_AZ_RX_DESCQ_FLS_DONE_EV: { uint32_t rxq_index; uint32_t failed; rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL); if (failed) { EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED); EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index); should_abort = eecp->eec_rxq_flush_failed(arg, rxq_index); } else { EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); } break; } case FSE_AZ_EVQ_INIT_DONE_EV: EFSYS_ASSERT(eecp->eec_initialized != NULL); should_abort = eecp->eec_initialized(arg); break; case FSE_AZ_EVQ_NOT_EN_EV: EFSYS_PROBE(evq_not_en); break; case FSE_AZ_SRM_UPD_DONE_EV: { uint32_t code; EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE); code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_ASSERT(eecp->eec_sram != NULL); should_abort = eecp->eec_sram(arg, code); break; } case FSE_AZ_WAKE_UP_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_ASSERT(eecp->eec_wake_up != NULL); should_abort = eecp->eec_wake_up(arg, id); break; } case FSE_AZ_TX_PKT_NON_TCP_UDP: EFSYS_PROBE(tx_pkt_non_tcp_udp); break; case FSE_AZ_TIMER_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_ASSERT(eecp->eec_timer != NULL); should_abort = eecp->eec_timer(arg, id); break; } case FSE_AZ_RX_DSC_ERROR_EV: EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR); EFSYS_PROBE(rx_dsc_error); EFSYS_ASSERT(eecp->eec_exception != NULL); should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_RX_DSC_ERROR, 0); break; case FSE_AZ_TX_DSC_ERROR_EV: EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR); EFSYS_PROBE(tx_dsc_error); EFSYS_ASSERT(eecp->eec_exception != NULL); should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_TX_DSC_ERROR, 0); break; default: break; } return (should_abort); } static __checkReturn boolean_t siena_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t data; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0); if (data >= ((uint32_t)1 << 16)) { EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); return (B_TRUE); } EFSYS_ASSERT(eecp->eec_software != NULL); should_abort = eecp->eec_software(arg, (uint16_t)data); return (should_abort); } #if EFSYS_OPT_MCDI static __checkReturn boolean_t siena_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; - unsigned code; + unsigned int code; boolean_t should_abort = B_FALSE; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); if (enp->en_family != EFX_FAMILY_SIENA) goto out; EFSYS_ASSERT(eecp->eec_link_change != NULL); EFSYS_ASSERT(eecp->eec_exception != NULL); #if EFSYS_OPT_MON_STATS EFSYS_ASSERT(eecp->eec_monitor != NULL); #endif EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); switch (code) { case MCDI_EVENT_CODE_BADSSERT: efx_mcdi_ev_death(enp, EINTR); break; case MCDI_EVENT_CODE_CMDDONE: efx_mcdi_ev_cpl(enp, MCDI_EV_FIELD(eqp, CMDDONE_SEQ), MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); break; case MCDI_EVENT_CODE_LINKCHANGE: { efx_link_mode_t link_mode; siena_phy_link_ev(enp, eqp, &link_mode); should_abort = eecp->eec_link_change(arg, link_mode); break; } case MCDI_EVENT_CODE_SENSOREVT: { #if EFSYS_OPT_MON_STATS efx_mon_stat_t id; efx_mon_stat_value_t value; efx_rc_t rc; if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) should_abort = eecp->eec_monitor(arg, id, value); else if (rc == ENOTSUP) { should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_SENSOREVT, MCDI_EV_FIELD(eqp, DATA)); } else EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ #else should_abort = B_FALSE; #endif break; } case MCDI_EVENT_CODE_SCHEDERR: /* Informational only */ break; case MCDI_EVENT_CODE_REBOOT: efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: #if EFSYS_OPT_MAC_STATS if (eecp->eec_mac_stats != NULL) { eecp->eec_mac_stats(arg, MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); } #endif break; case MCDI_EVENT_CODE_FWALERT: { uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_FWALERT_SRAM, MCDI_EV_FIELD(eqp, FWALERT_DATA)); else should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_FWALERT, MCDI_EV_FIELD(eqp, DATA)); break; } default: EFSYS_PROBE1(mc_pcol_error, int, code); break; } out: return (should_abort); } #endif /* EFSYS_OPT_MCDI */ static __checkReturn efx_rc_t siena_ev_qprime( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; uint32_t rptr; efx_dword_t dword; rptr = count & eep->ee_mask; EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr); EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index, &dword, B_FALSE); return (0); } #define EFX_EV_BATCH 8 static void siena_ev_qpoll( __in efx_evq_t *eep, __inout unsigned int *countp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_qword_t ev[EFX_EV_BATCH]; unsigned int batch; unsigned int total; unsigned int count; unsigned int index; size_t offset; EFSYS_ASSERT(countp != NULL); EFSYS_ASSERT(eecp != NULL); count = *countp; do { /* Read up until the end of the batch period */ batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1)); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); for (total = 0; total < batch; ++total) { EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total])); if (!EFX_EV_PRESENT(ev[total])) break; EFSYS_PROBE3(event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0)); offset += sizeof (efx_qword_t); } #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1) /* * Prefetch the next batch when we get within PREFETCH_PERIOD * of a completed batch. If the batch is smaller, then prefetch * immediately. */ if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD) EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); #endif /* EFSYS_OPT_EV_PREFETCH */ /* Process the batch of events */ for (index = 0; index < total; ++index) { boolean_t should_abort; uint32_t code; #if EFSYS_OPT_EV_PREFETCH /* Prefetch if we've now reached the batch period */ if (total == batch && index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) { offset = (count + batch) & eep->ee_mask; offset *= sizeof (efx_qword_t); EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); } #endif /* EFSYS_OPT_EV_PREFETCH */ EFX_EV_QSTAT_INCR(eep, EV_ALL); code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE); switch (code) { case FSE_AZ_EV_CODE_RX_EV: should_abort = eep->ee_rx(eep, &(ev[index]), eecp, arg); break; case FSE_AZ_EV_CODE_TX_EV: should_abort = eep->ee_tx(eep, &(ev[index]), eecp, arg); break; case FSE_AZ_EV_CODE_DRIVER_EV: should_abort = eep->ee_driver(eep, &(ev[index]), eecp, arg); break; case FSE_AZ_EV_CODE_DRV_GEN_EV: should_abort = eep->ee_drv_gen(eep, &(ev[index]), eecp, arg); break; #if EFSYS_OPT_MCDI case FSE_AZ_EV_CODE_MCDI_EVRESPONSE: should_abort = eep->ee_mcdi(eep, &(ev[index]), eecp, arg); break; #endif case FSE_AZ_EV_CODE_GLOBAL_EV: if (eep->ee_global) { should_abort = eep->ee_global(eep, &(ev[index]), eecp, arg); break; } /* else fallthrough */ default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(ev[index], EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(ev[index], EFX_DWORD_0)); EFSYS_ASSERT(eecp->eec_exception != NULL); (void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code); should_abort = B_TRUE; } if (should_abort) { /* Ignore subsequent events */ total = index + 1; break; } } /* * Now that the hardware has most likely moved onto dma'ing * into the next cache line, clear the processed events. Take * care to only clear out events that we've processed */ EFX_SET_QWORD(ev[0]); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); for (index = 0; index < total; ++index) { EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0])); offset += sizeof (efx_qword_t); } count += total; } while (total == batch); *countp = count; } static void siena_ev_qpost( __in efx_evq_t *eep, __in uint16_t data) { efx_nic_t *enp = eep->ee_enp; efx_qword_t ev; efx_oword_t oword; EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV, FSF_AZ_EV_DATA_DW0, (uint32_t)data); EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index, EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0), EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1)); EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword); } static __checkReturn efx_rc_t siena_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us) { efx_nic_t *enp = eep->ee_enp; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); unsigned int locked; efx_dword_t dword; efx_rc_t rc; if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail1; } /* If the value is zero then disable the timer */ if (us == 0) { EFX_POPULATE_DWORD_2(dword, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS, FRF_CZ_TC_TIMER_VAL, 0); } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail2; EFSYS_ASSERT(ticks > 0); EFX_POPULATE_DWORD_2(dword, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF, FRF_CZ_TC_TIMER_VAL, ticks - 1); } locked = (eep->ee_index == 0) ? 1 : 0; EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0, eep->ee_index, &dword, locked); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t siena_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint32_t us, __in efx_evq_t *eep) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t size; efx_oword_t oword; efx_rc_t rc; _NOTE(ARGUNUSED(esmp)) EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_evq_limit) { rc = EINVAL; goto fail2; } #if EFSYS_OPT_RX_SCALE if (enp->en_intr.ei_type == EFX_INTR_LINE && index >= EFX_MAXRSS_LEGACY) { rc = EINVAL; goto fail3; } #endif for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS); size++) if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS)) break; if (id + (1 << size) >= encp->enc_buftbl_limit) { rc = EINVAL; goto fail4; } /* Set up the handler table */ eep->ee_rx = siena_ev_rx; eep->ee_tx = siena_ev_tx; eep->ee_driver = siena_ev_driver; eep->ee_global = siena_ev_global; eep->ee_drv_gen = siena_ev_drv_gen; #if EFSYS_OPT_MCDI eep->ee_mcdi = siena_ev_mcdi; #endif /* EFSYS_OPT_MCDI */ /* Set up the new event queue */ EFX_POPULATE_OWORD_1(oword, FRF_CZ_TIMER_Q_EN, 1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE); EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size, FRF_AZ_EVQ_BUF_BASE_ID, id); EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE); /* Set initial interrupt moderation */ siena_ev_qmoderate(eep, us); return (0); fail4: EFSYS_PROBE(fail4); #if EFSYS_OPT_RX_SCALE fail3: EFSYS_PROBE(fail3); #endif fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock c0f3bc5083b40532 */ static const char * const __efx_ev_qstat_name[] = { "all", "rx", "rx_ok", "rx_frm_trunc", "rx_tobe_disc", "rx_pause_frm_err", "rx_buf_owner_id_err", "rx_ipv4_hdr_chksum_err", "rx_tcp_udp_chksum_err", "rx_eth_crc_err", "rx_ip_frag_err", "rx_mcast_pkt", "rx_mcast_hash_match", "rx_tcp_ipv4", "rx_tcp_ipv6", "rx_udp_ipv4", "rx_udp_ipv6", "rx_other_ipv4", "rx_other_ipv6", "rx_non_ip", "rx_batch", "tx", "tx_wq_ff_full", "tx_pkt_err", "tx_pkt_too_big", "tx_unexpected", "global", "global_mnt", "driver", "driver_srm_upd_done", "driver_tx_descq_fls_done", "driver_rx_descq_fls_done", "driver_rx_descq_fls_failed", "driver_rx_dsc_error", "driver_tx_dsc_error", "drv_gen", "mcdi_response", }; /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */ const char * efx_ev_qstat_name( __in efx_nic_t *enp, __in unsigned int id) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(id, <, EV_NQSTATS); return (__efx_ev_qstat_name[id]); } #endif /* EFSYS_OPT_NAMES */ #endif /* EFSYS_OPT_QSTATS */ #if EFSYS_OPT_SIENA #if EFSYS_OPT_QSTATS static void siena_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) { unsigned int id; for (id = 0; id < EV_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, eep->ee_stat[id]); eep->ee_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ static void siena_ev_qdestroy( __in efx_evq_t *eep) { efx_nic_t *enp = eep->ee_enp; efx_oword_t oword; /* Purge event queue */ EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, eep->ee_index, &oword, B_TRUE); EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE); } static void siena_ev_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } #endif /* EFSYS_OPT_SIENA */ Index: head/sys/dev/sfxge/common/efx_filter.c =================================================================== --- head/sys/dev/sfxge/common/efx_filter.c (revision 310692) +++ head/sys/dev/sfxge/common/efx_filter.c (revision 310693) @@ -1,1402 +1,1402 @@ /*- * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_FILTER #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_filter_init( __in efx_nic_t *enp); static void siena_filter_fini( __in efx_nic_t *enp); static __checkReturn efx_rc_t siena_filter_restore( __in efx_nic_t *enp); static __checkReturn efx_rc_t siena_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace); static __checkReturn efx_rc_t siena_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec); static __checkReturn efx_rc_t siena_filter_supported_filters( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length); #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA static const efx_filter_ops_t __efx_filter_siena_ops = { siena_filter_init, /* efo_init */ siena_filter_fini, /* efo_fini */ siena_filter_restore, /* efo_restore */ siena_filter_add, /* efo_add */ siena_filter_delete, /* efo_delete */ siena_filter_supported_filters, /* efo_supported_filters */ NULL, /* efo_reconfigure */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD static const efx_filter_ops_t __efx_filter_ef10_ops = { ef10_filter_init, /* efo_init */ ef10_filter_fini, /* efo_fini */ ef10_filter_restore, /* efo_restore */ ef10_filter_add, /* efo_add */ ef10_filter_delete, /* efo_delete */ ef10_filter_supported_filters, /* efo_supported_filters */ ef10_filter_reconfigure, /* efo_reconfigure */ }; #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_filter_insert( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec) { const efx_filter_ops_t *efop = enp->en_efop; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX); return (efop->efo_add(enp, spec, B_FALSE)); } __checkReturn efx_rc_t efx_filter_remove( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec) { const efx_filter_ops_t *efop = enp->en_efop; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX); #if EFSYS_OPT_RX_SCALE spec->efs_rss_context = enp->en_rss_context; #endif return (efop->efo_delete(enp, spec)); } __checkReturn efx_rc_t efx_filter_restore( __in efx_nic_t *enp) { efx_rc_t rc; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); if ((rc = enp->en_efop->efo_restore(enp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_filter_init( __in efx_nic_t *enp) { const efx_filter_ops_t *efop; efx_rc_t rc; /* Check that efx_filter_spec_t is 64 bytes. */ EFX_STATIC_ASSERT(sizeof (efx_filter_spec_t) == 64); EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_FILTER)); switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: efop = &__efx_filter_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: efop = &__efx_filter_ef10_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: efop = &__efx_filter_ef10_ops; break; #endif /* EFSYS_OPT_MEDFORD */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } if ((rc = efop->efo_init(enp)) != 0) goto fail2; enp->en_efop = efop; enp->en_mod_flags |= EFX_MOD_FILTER; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_efop = NULL; enp->en_mod_flags &= ~EFX_MOD_FILTER; return (rc); } void efx_filter_fini( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); enp->en_efop->efo_fini(enp); enp->en_efop = NULL; enp->en_mod_flags &= ~EFX_MOD_FILTER; } __checkReturn efx_rc_t efx_filter_supported_filters( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length) { efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); EFSYS_ASSERT(enp->en_efop->efo_supported_filters != NULL); if ((rc = enp->en_efop->efo_supported_filters(enp, list, length)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count) { efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); if (enp->en_efop->efo_reconfigure != NULL) { if ((rc = enp->en_efop->efo_reconfigure(enp, mac_addr, all_unicst, mulcst, all_mulcst, brdcst, addrs, count)) != 0) goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_filter_spec_init_rx( __out efx_filter_spec_t *spec, __in efx_filter_priority_t priority, __in efx_filter_flag_t flags, __in efx_rxq_t *erp) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3P(erp, !=, NULL); EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_SCATTER)) == 0); memset(spec, 0, sizeof (*spec)); spec->efs_priority = priority; spec->efs_flags = EFX_FILTER_FLAG_RX | flags; spec->efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT; spec->efs_dmaq_id = (uint16_t)erp->er_index; } void efx_filter_spec_init_tx( __out efx_filter_spec_t *spec, __in efx_txq_t *etp) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3P(etp, !=, NULL); memset(spec, 0, sizeof (*spec)); spec->efs_priority = EFX_FILTER_PRI_REQUIRED; spec->efs_flags = EFX_FILTER_FLAG_TX; spec->efs_dmaq_id = (uint16_t)etp->et_index; } /* * Specify IPv4 host, transport protocol and port in a filter specification */ __checkReturn efx_rc_t efx_filter_spec_set_ipv4_local( __inout efx_filter_spec_t *spec, __in uint8_t proto, __in uint32_t host, __in uint16_t port) { EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; spec->efs_ether_type = EFX_ETHER_TYPE_IPV4; spec->efs_ip_proto = proto; spec->efs_loc_host.eo_u32[0] = host; spec->efs_loc_port = port; return (0); } /* * Specify IPv4 hosts, transport protocol and ports in a filter specification */ __checkReturn efx_rc_t efx_filter_spec_set_ipv4_full( __inout efx_filter_spec_t *spec, __in uint8_t proto, __in uint32_t lhost, __in uint16_t lport, __in uint32_t rhost, __in uint16_t rport) { EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; spec->efs_ether_type = EFX_ETHER_TYPE_IPV4; spec->efs_ip_proto = proto; spec->efs_loc_host.eo_u32[0] = lhost; spec->efs_loc_port = lport; spec->efs_rem_host.eo_u32[0] = rhost; spec->efs_rem_port = rport; return (0); } /* * Specify local Ethernet address and/or VID in filter specification */ __checkReturn efx_rc_t efx_filter_spec_set_eth_local( __inout efx_filter_spec_t *spec, __in uint16_t vid, __in const uint8_t *addr) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3P(addr, !=, NULL); if (vid == EFX_FILTER_SPEC_VID_UNSPEC && addr == NULL) return (EINVAL); if (vid != EFX_FILTER_SPEC_VID_UNSPEC) { spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID; spec->efs_outer_vid = vid; } if (addr != NULL) { spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC; memcpy(spec->efs_loc_mac, addr, EFX_MAC_ADDR_LEN); } return (0); } /* * Specify matching otherwise-unmatched unicast in a filter specification */ __checkReturn efx_rc_t efx_filter_spec_set_uc_def( __inout efx_filter_spec_t *spec) { EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; return (0); } /* * Specify matching otherwise-unmatched multicast in a filter specification */ __checkReturn efx_rc_t efx_filter_spec_set_mc_def( __inout efx_filter_spec_t *spec) { EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; spec->efs_loc_mac[0] = 1; return (0); } #if EFSYS_OPT_SIENA /* * "Fudge factors" - difference between programmed value and actual depth. * Due to pipelined implementation we need to program H/W with a value that * is larger than the hop limit we want. */ #define FILTER_CTL_SRCH_FUDGE_WILD 3 #define FILTER_CTL_SRCH_FUDGE_FULL 1 /* * Hard maximum hop limit. Hardware will time-out beyond 200-something. * We also need to avoid infinite loops in efx_filter_search() when the * table is full. */ #define FILTER_CTL_SRCH_MAX 200 static __checkReturn efx_rc_t siena_filter_spec_from_gen_spec( __out siena_filter_spec_t *sf_spec, __in efx_filter_spec_t *gen_spec) { efx_rc_t rc; boolean_t is_full = B_FALSE; if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) EFSYS_ASSERT3U(gen_spec->efs_flags, ==, EFX_FILTER_FLAG_TX); else EFSYS_ASSERT3U(gen_spec->efs_flags, &, EFX_FILTER_FLAG_RX); /* Falconsiena only has one RSS context */ if ((gen_spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->efs_rss_context != 0) { rc = EINVAL; goto fail1; } sf_spec->sfs_flags = gen_spec->efs_flags; sf_spec->sfs_dmaq_id = gen_spec->efs_dmaq_id; switch (gen_spec->efs_match_flags) { case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT: is_full = B_TRUE; /* Fall through */ case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT: { uint32_t rhost, host1, host2; uint16_t rport, port1, port2; if (gen_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4) { rc = ENOTSUP; goto fail2; } if (gen_spec->efs_loc_port == 0 || (is_full && gen_spec->efs_rem_port == 0)) { rc = EINVAL; goto fail3; } switch (gen_spec->efs_ip_proto) { case EFX_IPPROTO_TCP: if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { sf_spec->sfs_type = (is_full ? EFX_SIENA_FILTER_TX_TCP_FULL : EFX_SIENA_FILTER_TX_TCP_WILD); } else { sf_spec->sfs_type = (is_full ? EFX_SIENA_FILTER_RX_TCP_FULL : EFX_SIENA_FILTER_RX_TCP_WILD); } break; case EFX_IPPROTO_UDP: if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { sf_spec->sfs_type = (is_full ? EFX_SIENA_FILTER_TX_UDP_FULL : EFX_SIENA_FILTER_TX_UDP_WILD); } else { sf_spec->sfs_type = (is_full ? EFX_SIENA_FILTER_RX_UDP_FULL : EFX_SIENA_FILTER_RX_UDP_WILD); } break; default: rc = ENOTSUP; goto fail4; } /* * The filter is constructed in terms of source and destination, * with the odd wrinkle that the ports are swapped in a UDP * wildcard filter. We need to convert from local and remote * addresses (zero for a wildcard). */ rhost = is_full ? gen_spec->efs_rem_host.eo_u32[0] : 0; rport = is_full ? gen_spec->efs_rem_port : 0; if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { host1 = gen_spec->efs_loc_host.eo_u32[0]; host2 = rhost; } else { host1 = rhost; host2 = gen_spec->efs_loc_host.eo_u32[0]; } if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { if (sf_spec->sfs_type == EFX_SIENA_FILTER_TX_UDP_WILD) { port1 = rport; port2 = gen_spec->efs_loc_port; } else { port1 = gen_spec->efs_loc_port; port2 = rport; } } else { if (sf_spec->sfs_type == EFX_SIENA_FILTER_RX_UDP_WILD) { port1 = gen_spec->efs_loc_port; port2 = rport; } else { port1 = rport; port2 = gen_spec->efs_loc_port; } } sf_spec->sfs_dword[0] = (host1 << 16) | port1; sf_spec->sfs_dword[1] = (port2 << 16) | (host1 >> 16); sf_spec->sfs_dword[2] = host2; break; } case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: is_full = B_TRUE; /* Fall through */ case EFX_FILTER_MATCH_LOC_MAC: if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { sf_spec->sfs_type = (is_full ? EFX_SIENA_FILTER_TX_MAC_FULL : EFX_SIENA_FILTER_TX_MAC_WILD); } else { sf_spec->sfs_type = (is_full ? EFX_SIENA_FILTER_RX_MAC_FULL : EFX_SIENA_FILTER_RX_MAC_WILD); } sf_spec->sfs_dword[0] = is_full ? gen_spec->efs_outer_vid : 0; sf_spec->sfs_dword[1] = gen_spec->efs_loc_mac[2] << 24 | gen_spec->efs_loc_mac[3] << 16 | gen_spec->efs_loc_mac[4] << 8 | gen_spec->efs_loc_mac[5]; sf_spec->sfs_dword[2] = gen_spec->efs_loc_mac[0] << 8 | gen_spec->efs_loc_mac[1]; break; default: EFSYS_ASSERT(B_FALSE); rc = ENOTSUP; goto fail5; } return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit * key derived from the n-tuple. */ static uint16_t siena_filter_tbl_hash( __in uint32_t key) { uint16_t tmp; /* First 16 rounds */ tmp = 0x1fff ^ (uint16_t)(key >> 16); tmp = tmp ^ tmp >> 3 ^ tmp >> 6; tmp = tmp ^ tmp >> 9; /* Last 16 rounds */ tmp = tmp ^ tmp << 13 ^ (uint16_t)(key & 0xffff); tmp = tmp ^ tmp >> 3 ^ tmp >> 6; tmp = tmp ^ tmp >> 9; return (tmp); } /* * To allow for hash collisions, filter search continues at these * increments from the first possible entry selected by the hash. */ static uint16_t siena_filter_tbl_increment( __in uint32_t key) { return ((uint16_t)(key * 2 - 1)); } static __checkReturn boolean_t siena_filter_test_used( __in siena_filter_tbl_t *sftp, __in unsigned int index) { EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL); return ((sftp->sft_bitmap[index / 32] & (1 << (index % 32))) != 0); } static void siena_filter_set_used( __in siena_filter_tbl_t *sftp, __in unsigned int index) { EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL); sftp->sft_bitmap[index / 32] |= (1 << (index % 32)); ++sftp->sft_used; } static void siena_filter_clear_used( __in siena_filter_tbl_t *sftp, __in unsigned int index) { EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL); sftp->sft_bitmap[index / 32] &= ~(1 << (index % 32)); --sftp->sft_used; EFSYS_ASSERT3U(sftp->sft_used, >=, 0); } static siena_filter_tbl_id_t siena_filter_tbl_id( __in siena_filter_type_t type) { siena_filter_tbl_id_t tbl_id; switch (type) { case EFX_SIENA_FILTER_RX_TCP_FULL: case EFX_SIENA_FILTER_RX_TCP_WILD: case EFX_SIENA_FILTER_RX_UDP_FULL: case EFX_SIENA_FILTER_RX_UDP_WILD: tbl_id = EFX_SIENA_FILTER_TBL_RX_IP; break; case EFX_SIENA_FILTER_RX_MAC_FULL: case EFX_SIENA_FILTER_RX_MAC_WILD: tbl_id = EFX_SIENA_FILTER_TBL_RX_MAC; break; case EFX_SIENA_FILTER_TX_TCP_FULL: case EFX_SIENA_FILTER_TX_TCP_WILD: case EFX_SIENA_FILTER_TX_UDP_FULL: case EFX_SIENA_FILTER_TX_UDP_WILD: tbl_id = EFX_SIENA_FILTER_TBL_TX_IP; break; case EFX_SIENA_FILTER_TX_MAC_FULL: case EFX_SIENA_FILTER_TX_MAC_WILD: tbl_id = EFX_SIENA_FILTER_TBL_TX_MAC; break; default: EFSYS_ASSERT(B_FALSE); tbl_id = EFX_SIENA_FILTER_NTBLS; break; } return (tbl_id); } static void siena_filter_reset_search_depth( __inout siena_filter_t *sfp, __in siena_filter_tbl_id_t tbl_id) { switch (tbl_id) { case EFX_SIENA_FILTER_TBL_RX_IP: sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] = 0; sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] = 0; sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] = 0; sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] = 0; break; case EFX_SIENA_FILTER_TBL_RX_MAC: sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] = 0; sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] = 0; break; case EFX_SIENA_FILTER_TBL_TX_IP: sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] = 0; sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] = 0; sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] = 0; sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] = 0; break; case EFX_SIENA_FILTER_TBL_TX_MAC: sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] = 0; sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] = 0; break; default: EFSYS_ASSERT(B_FALSE); break; } } static void siena_filter_push_rx_limits( __in efx_nic_t *enp) { siena_filter_t *sfp = enp->en_filter.ef_siena_filter; efx_oword_t oword; EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_FULL_SRCH_LIMIT, sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_WILD_SRCH_LIMIT, sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_FULL_SRCH_LIMIT, sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_WILD_SRCH_LIMIT, sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC].sft_size) { EFX_SET_OWORD_FIELD(oword, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); } EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); } static void siena_filter_push_tx_limits( __in efx_nic_t *enp) { siena_filter_t *sfp = enp->en_filter.ef_siena_filter; efx_oword_t oword; EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword); if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP].sft_size != 0) { EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE, sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE, sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE, sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE, sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); } if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC].sft_size != 0) { EFX_SET_OWORD_FIELD( oword, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD( oword, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); } EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword); } /* Build a filter entry and return its n-tuple key. */ static __checkReturn uint32_t siena_filter_build( __out efx_oword_t *filter, __in siena_filter_spec_t *spec) { uint32_t dword3; uint32_t key; uint8_t type = spec->sfs_type; uint32_t flags = spec->sfs_flags; switch (siena_filter_tbl_id(type)) { case EFX_SIENA_FILTER_TBL_RX_IP: { boolean_t is_udp = (type == EFX_SIENA_FILTER_RX_UDP_FULL || type == EFX_SIENA_FILTER_RX_UDP_WILD); EFX_POPULATE_OWORD_7(*filter, FRF_BZ_RSS_EN, (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0, FRF_BZ_SCATTER_EN, (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0, FRF_AZ_TCP_UDP, is_udp, FRF_AZ_RXQ_ID, spec->sfs_dmaq_id, EFX_DWORD_2, spec->sfs_dword[2], EFX_DWORD_1, spec->sfs_dword[1], EFX_DWORD_0, spec->sfs_dword[0]); dword3 = is_udp; break; } case EFX_SIENA_FILTER_TBL_RX_MAC: { boolean_t is_wild = (type == EFX_SIENA_FILTER_RX_MAC_WILD); EFX_POPULATE_OWORD_7(*filter, FRF_CZ_RMFT_RSS_EN, (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0, FRF_CZ_RMFT_SCATTER_EN, (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0, FRF_CZ_RMFT_RXQ_ID, spec->sfs_dmaq_id, FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, FRF_CZ_RMFT_DEST_MAC_DW1, spec->sfs_dword[2], FRF_CZ_RMFT_DEST_MAC_DW0, spec->sfs_dword[1], FRF_CZ_RMFT_VLAN_ID, spec->sfs_dword[0]); dword3 = is_wild; break; } case EFX_SIENA_FILTER_TBL_TX_IP: { boolean_t is_udp = (type == EFX_SIENA_FILTER_TX_UDP_FULL || type == EFX_SIENA_FILTER_TX_UDP_WILD); EFX_POPULATE_OWORD_5(*filter, FRF_CZ_TIFT_TCP_UDP, is_udp, FRF_CZ_TIFT_TXQ_ID, spec->sfs_dmaq_id, EFX_DWORD_2, spec->sfs_dword[2], EFX_DWORD_1, spec->sfs_dword[1], EFX_DWORD_0, spec->sfs_dword[0]); dword3 = is_udp | spec->sfs_dmaq_id << 1; break; } case EFX_SIENA_FILTER_TBL_TX_MAC: { boolean_t is_wild = (type == EFX_SIENA_FILTER_TX_MAC_WILD); EFX_POPULATE_OWORD_5(*filter, FRF_CZ_TMFT_TXQ_ID, spec->sfs_dmaq_id, FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, FRF_CZ_TMFT_SRC_MAC_DW1, spec->sfs_dword[2], FRF_CZ_TMFT_SRC_MAC_DW0, spec->sfs_dword[1], FRF_CZ_TMFT_VLAN_ID, spec->sfs_dword[0]); dword3 = is_wild | spec->sfs_dmaq_id << 1; break; } default: EFSYS_ASSERT(B_FALSE); return (0); } key = spec->sfs_dword[0] ^ spec->sfs_dword[1] ^ spec->sfs_dword[2] ^ dword3; return (key); } static __checkReturn efx_rc_t siena_filter_push_entry( __inout efx_nic_t *enp, __in siena_filter_type_t type, __in int index, __in efx_oword_t *eop) { efx_rc_t rc; switch (type) { case EFX_SIENA_FILTER_RX_TCP_FULL: case EFX_SIENA_FILTER_RX_TCP_WILD: case EFX_SIENA_FILTER_RX_UDP_FULL: case EFX_SIENA_FILTER_RX_UDP_WILD: EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_FILTER_TBL0, index, eop, B_TRUE); break; case EFX_SIENA_FILTER_RX_MAC_FULL: case EFX_SIENA_FILTER_RX_MAC_WILD: EFX_BAR_TBL_WRITEO(enp, FR_CZ_RX_MAC_FILTER_TBL0, index, eop, B_TRUE); break; case EFX_SIENA_FILTER_TX_TCP_FULL: case EFX_SIENA_FILTER_TX_TCP_WILD: case EFX_SIENA_FILTER_TX_UDP_FULL: case EFX_SIENA_FILTER_TX_UDP_WILD: EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_FILTER_TBL0, index, eop, B_TRUE); break; case EFX_SIENA_FILTER_TX_MAC_FULL: case EFX_SIENA_FILTER_TX_MAC_WILD: EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_MAC_FILTER_TBL0, index, eop, B_TRUE); break; default: EFSYS_ASSERT(B_FALSE); rc = ENOTSUP; goto fail1; } return (0); fail1: return (rc); } static __checkReturn boolean_t siena_filter_equal( __in const siena_filter_spec_t *left, __in const siena_filter_spec_t *right) { siena_filter_tbl_id_t tbl_id; tbl_id = siena_filter_tbl_id(left->sfs_type); if (left->sfs_type != right->sfs_type) return (B_FALSE); if (memcmp(left->sfs_dword, right->sfs_dword, sizeof (left->sfs_dword))) return (B_FALSE); if ((tbl_id == EFX_SIENA_FILTER_TBL_TX_IP || tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC) && left->sfs_dmaq_id != right->sfs_dmaq_id) return (B_FALSE); return (B_TRUE); } static __checkReturn efx_rc_t siena_filter_search( __in siena_filter_tbl_t *sftp, __in siena_filter_spec_t *spec, __in uint32_t key, __in boolean_t for_insert, __out int *filter_index, __out unsigned int *depth_required) { - unsigned hash, incr, filter_idx, depth; + unsigned int hash, incr, filter_idx, depth; hash = siena_filter_tbl_hash(key); incr = siena_filter_tbl_increment(key); filter_idx = hash & (sftp->sft_size - 1); depth = 1; for (;;) { /* * Return success if entry is used and matches this spec * or entry is unused and we are trying to insert. */ if (siena_filter_test_used(sftp, filter_idx) ? siena_filter_equal(spec, &sftp->sft_spec[filter_idx]) : for_insert) { *filter_index = filter_idx; *depth_required = depth; return (0); } /* Return failure if we reached the maximum search depth */ if (depth == FILTER_CTL_SRCH_MAX) return (for_insert ? EBUSY : ENOENT); filter_idx = (filter_idx + incr) & (sftp->sft_size - 1); ++depth; } } static void siena_filter_clear_entry( __in efx_nic_t *enp, __in siena_filter_tbl_t *sftp, __in int index) { efx_oword_t filter; if (siena_filter_test_used(sftp, index)) { siena_filter_clear_used(sftp, index); EFX_ZERO_OWORD(filter); siena_filter_push_entry(enp, sftp->sft_spec[index].sfs_type, index, &filter); memset(&sftp->sft_spec[index], 0, sizeof (sftp->sft_spec[0])); } } void siena_filter_tbl_clear( __in efx_nic_t *enp, __in siena_filter_tbl_id_t tbl_id) { siena_filter_t *sfp = enp->en_filter.ef_siena_filter; siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id]; int index; int state; EFSYS_LOCK(enp->en_eslp, state); for (index = 0; index < sftp->sft_size; ++index) { siena_filter_clear_entry(enp, sftp, index); } if (sftp->sft_used == 0) siena_filter_reset_search_depth(sfp, tbl_id); EFSYS_UNLOCK(enp->en_eslp, state); } static __checkReturn efx_rc_t siena_filter_init( __in efx_nic_t *enp) { siena_filter_t *sfp; siena_filter_tbl_t *sftp; int tbl_id; efx_rc_t rc; EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (siena_filter_t), sfp); if (!sfp) { rc = ENOMEM; goto fail1; } enp->en_filter.ef_siena_filter = sfp; switch (enp->en_family) { case EFX_FAMILY_SIENA: sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_IP]; sftp->sft_size = FR_AZ_RX_FILTER_TBL0_ROWS; sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC]; sftp->sft_size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP]; sftp->sft_size = FR_CZ_TX_FILTER_TBL0_ROWS; sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC]; sftp->sft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; break; default: rc = ENOTSUP; goto fail2; } for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) { unsigned int bitmap_size; sftp = &sfp->sf_tbl[tbl_id]; if (sftp->sft_size == 0) continue; EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) == sizeof (uint32_t)); bitmap_size = (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8; EFSYS_KMEM_ALLOC(enp->en_esip, bitmap_size, sftp->sft_bitmap); if (!sftp->sft_bitmap) { rc = ENOMEM; goto fail3; } EFSYS_KMEM_ALLOC(enp->en_esip, sftp->sft_size * sizeof (*sftp->sft_spec), sftp->sft_spec); if (!sftp->sft_spec) { rc = ENOMEM; goto fail4; } memset(sftp->sft_spec, 0, sftp->sft_size * sizeof (*sftp->sft_spec)); } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); siena_filter_fini(enp); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static void siena_filter_fini( __in efx_nic_t *enp) { siena_filter_t *sfp = enp->en_filter.ef_siena_filter; siena_filter_tbl_id_t tbl_id; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); if (sfp == NULL) return; for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) { siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id]; unsigned int bitmap_size; EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) == sizeof (uint32_t)); bitmap_size = (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8; if (sftp->sft_bitmap != NULL) { EFSYS_KMEM_FREE(enp->en_esip, bitmap_size, sftp->sft_bitmap); sftp->sft_bitmap = NULL; } if (sftp->sft_spec != NULL) { EFSYS_KMEM_FREE(enp->en_esip, sftp->sft_size * sizeof (*sftp->sft_spec), sftp->sft_spec); sftp->sft_spec = NULL; } } EFSYS_KMEM_FREE(enp->en_esip, sizeof (siena_filter_t), enp->en_filter.ef_siena_filter); } /* Restore filter state after a reset */ static __checkReturn efx_rc_t siena_filter_restore( __in efx_nic_t *enp) { siena_filter_t *sfp = enp->en_filter.ef_siena_filter; siena_filter_tbl_id_t tbl_id; siena_filter_tbl_t *sftp; siena_filter_spec_t *spec; efx_oword_t filter; int filter_idx; int state; efx_rc_t rc; EFSYS_LOCK(enp->en_eslp, state); for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) { sftp = &sfp->sf_tbl[tbl_id]; for (filter_idx = 0; filter_idx < sftp->sft_size; filter_idx++) { if (!siena_filter_test_used(sftp, filter_idx)) continue; spec = &sftp->sft_spec[filter_idx]; if ((rc = siena_filter_build(&filter, spec)) != 0) goto fail1; if ((rc = siena_filter_push_entry(enp, spec->sfs_type, filter_idx, &filter)) != 0) goto fail2; } } siena_filter_push_rx_limits(enp); siena_filter_push_tx_limits(enp); EFSYS_UNLOCK(enp->en_eslp, state); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } static __checkReturn efx_rc_t siena_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace) { efx_rc_t rc; siena_filter_spec_t sf_spec; siena_filter_t *sfp = enp->en_filter.ef_siena_filter; siena_filter_tbl_id_t tbl_id; siena_filter_tbl_t *sftp; siena_filter_spec_t *saved_sf_spec; efx_oword_t filter; int filter_idx; unsigned int depth; int state; uint32_t key; EFSYS_ASSERT3P(spec, !=, NULL); if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0) goto fail1; tbl_id = siena_filter_tbl_id(sf_spec.sfs_type); sftp = &sfp->sf_tbl[tbl_id]; if (sftp->sft_size == 0) { rc = EINVAL; goto fail2; } key = siena_filter_build(&filter, &sf_spec); EFSYS_LOCK(enp->en_eslp, state); rc = siena_filter_search(sftp, &sf_spec, key, B_TRUE, &filter_idx, &depth); if (rc != 0) goto fail3; EFSYS_ASSERT3U(filter_idx, <, sftp->sft_size); saved_sf_spec = &sftp->sft_spec[filter_idx]; if (siena_filter_test_used(sftp, filter_idx)) { if (may_replace == B_FALSE) { rc = EEXIST; goto fail4; } } siena_filter_set_used(sftp, filter_idx); *saved_sf_spec = sf_spec; if (sfp->sf_depth[sf_spec.sfs_type] < depth) { sfp->sf_depth[sf_spec.sfs_type] = depth; if (tbl_id == EFX_SIENA_FILTER_TBL_TX_IP || tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC) siena_filter_push_tx_limits(enp); else siena_filter_push_rx_limits(enp); } siena_filter_push_entry(enp, sf_spec.sfs_type, filter_idx, &filter); EFSYS_UNLOCK(enp->en_eslp, state); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t siena_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec) { efx_rc_t rc; siena_filter_spec_t sf_spec; siena_filter_t *sfp = enp->en_filter.ef_siena_filter; siena_filter_tbl_id_t tbl_id; siena_filter_tbl_t *sftp; efx_oword_t filter; int filter_idx; unsigned int depth; int state; uint32_t key; EFSYS_ASSERT3P(spec, !=, NULL); if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0) goto fail1; tbl_id = siena_filter_tbl_id(sf_spec.sfs_type); sftp = &sfp->sf_tbl[tbl_id]; key = siena_filter_build(&filter, &sf_spec); EFSYS_LOCK(enp->en_eslp, state); rc = siena_filter_search(sftp, &sf_spec, key, B_FALSE, &filter_idx, &depth); if (rc != 0) goto fail2; siena_filter_clear_entry(enp, sftp, filter_idx); if (sftp->sft_used == 0) siena_filter_reset_search_depth(sfp, tbl_id); EFSYS_UNLOCK(enp->en_eslp, state); return (0); fail2: EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #define MAX_SUPPORTED 4 static __checkReturn efx_rc_t siena_filter_supported_filters( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length) { int index = 0; uint32_t rx_matches[MAX_SUPPORTED]; efx_rc_t rc; if (list == NULL) { rc = EINVAL; goto fail1; } rx_matches[index++] = EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; rx_matches[index++] = EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; if (enp->en_features & EFX_FEATURE_MAC_HEADER_FILTERS) { rx_matches[index++] = EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC; rx_matches[index++] = EFX_FILTER_MATCH_LOC_MAC; } EFSYS_ASSERT3U(index, <=, MAX_SUPPORTED); *length = index; memcpy(list, rx_matches, *length); return (0); fail1: return (rc); } #undef MAX_SUPPORTED #endif /* EFSYS_OPT_SIENA */ #endif /* EFSYS_OPT_FILTER */ Index: head/sys/dev/sfxge/common/siena_vpd.c =================================================================== --- head/sys/dev/sfxge/common/siena_vpd.c (revision 310692) +++ head/sys/dev/sfxge/common/siena_vpd.c (revision 310693) @@ -1,621 +1,621 @@ /*- * Copyright (c) 2009-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_VPD #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_vpd_get_static( __in efx_nic_t *enp, __in uint32_t partn, __deref_out_bcount_opt(*sizep) caddr_t *svpdp, __out size_t *sizep) { siena_mc_static_config_hdr_t *scfg; caddr_t svpd; size_t size; uint8_t cksum; unsigned int vpd_offset; unsigned int vpd_length; unsigned int hdr_length; unsigned int pos; unsigned int region; efx_rc_t rc; EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 || partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1); /* Allocate sufficient memory for the entire static cfg area */ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0) goto fail1; EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg); if (scfg == NULL) { rc = ENOMEM; goto fail2; } if ((rc = siena_nvram_partn_read(enp, partn, 0, (caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0) goto fail3; /* Verify the magic number */ if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) != SIENA_MC_STATIC_CONFIG_MAGIC) { rc = EINVAL; goto fail4; } /* All future versions of the structure must be backwards compatible */ EFX_STATIC_ASSERT(SIENA_MC_STATIC_CONFIG_VERSION == 0); hdr_length = EFX_WORD_FIELD(scfg->length, EFX_WORD_0); vpd_offset = EFX_DWORD_FIELD(scfg->static_vpd_offset, EFX_DWORD_0); vpd_length = EFX_DWORD_FIELD(scfg->static_vpd_length, EFX_DWORD_0); /* Verify the hdr doesn't overflow the sector size */ if (hdr_length > size || vpd_offset > size || vpd_length > size || vpd_length + vpd_offset > size) { rc = EINVAL; goto fail5; } /* Read the remainder of scfg + static vpd */ region = vpd_offset + vpd_length; if (region > SIENA_NVRAM_CHUNK) { if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK, (caddr_t)scfg + SIENA_NVRAM_CHUNK, region - SIENA_NVRAM_CHUNK)) != 0) goto fail6; } /* Verify checksum */ cksum = 0; for (pos = 0; pos < hdr_length; pos++) cksum += ((uint8_t *)scfg)[pos]; if (cksum != 0) { rc = EINVAL; goto fail7; } if (vpd_length == 0) svpd = NULL; else { /* Copy the vpd data out */ EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd); if (svpd == NULL) { rc = ENOMEM; goto fail8; } memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length); } EFSYS_KMEM_FREE(enp->en_esip, size, scfg); *svpdp = svpd; *sizep = vpd_length; return (0); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); EFSYS_KMEM_FREE(enp->en_esip, size, scfg); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_vpd_init( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); caddr_t svpd = NULL; - unsigned partn; + unsigned int partn; size_t size = 0; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); partn = (emip->emi_port == 1) ? MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 : MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1; /* * We need the static VPD sector to present a unified static+dynamic * VPD, that is, basically on every read, write, verify cycle. Since * it should *never* change we can just cache it here. */ if ((rc = siena_vpd_get_static(enp, partn, &svpd, &size)) != 0) goto fail1; if (svpd != NULL && size > 0) { if ((rc = efx_vpd_hunk_verify(svpd, size, NULL)) != 0) goto fail2; } enp->en_u.siena.enu_svpd = svpd; enp->en_u.siena.enu_svpd_length = size; return (0); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, size, svpd); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_vpd_size( __in efx_nic_t *enp, __out size_t *sizep) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); uint32_t partn; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); /* * This function returns the total size the user should allocate * for all VPD operations. We've already cached the static vpd, * so we just need to return an upper bound on the dynamic vpd. * Since the dynamic_config structure can change under our feet, * (as version numbers are inserted), just be safe and return the * total size of the dynamic_config *sector* */ partn = (emip->emi_port == 1) ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; if ((rc = siena_nvram_partn_size(enp, partn, sizep)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); siena_mc_dynamic_config_hdr_t *dcfg = NULL; unsigned int vpd_length; unsigned int vpd_offset; unsigned int dcfg_partn; size_t dcfg_size; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); dcfg_partn = (emip->emi_port == 1) ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn, B_TRUE, &dcfg, &dcfg_size)) != 0) goto fail1; vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0); vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0); if (vpd_length > size) { rc = EFAULT; /* Invalid dcfg: header bigger than sector */ goto fail2; } EFSYS_ASSERT3U(vpd_length, <=, size); memcpy(data, (caddr_t)dcfg + vpd_offset, vpd_length); /* Pad data with all-1s, consistent with update operations */ memset(data + vpd_length, 0xff, size - vpd_length); EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg); return (0); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { efx_vpd_tag_t stag; efx_vpd_tag_t dtag; efx_vpd_keyword_t skey; efx_vpd_keyword_t dkey; unsigned int scont; unsigned int dcont; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); /* * Strictly you could take the view that dynamic vpd is optional. * Instead, to conform more closely to the read/verify/reinit() * paradigm, we require dynamic vpd. siena_vpd_reinit() will * reinitialize it as required. */ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0) goto fail1; /* * Verify that there is no duplication between the static and * dynamic cfg sectors. */ if (enp->en_u.siena.enu_svpd_length == 0) goto done; dcont = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_hunk_next(data, size, &dtag, &dkey, NULL, NULL, &dcont)) != 0) goto fail2; if (dcont == 0) break; /* * Skip the RV keyword. It should be present in both the static * and dynamic cfg sectors. */ if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V')) continue; scont = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_hunk_next( enp->en_u.siena.enu_svpd, enp->en_u.siena.enu_svpd_length, &stag, &skey, NULL, NULL, &scont)) != 0) goto fail3; if (scont == 0) break; if (stag == dtag && skey == dkey) { rc = EEXIST; goto fail4; } } } done: return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { boolean_t wantpid; efx_rc_t rc; /* * Only create a PID if the dynamic cfg doesn't have one */ if (enp->en_u.siena.enu_svpd_length == 0) wantpid = B_TRUE; else { unsigned int offset; uint8_t length; rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd, enp->en_u.siena.enu_svpd_length, EFX_VPD_ID, 0, &offset, &length); if (rc == 0) wantpid = B_FALSE; else if (rc == ENOENT) wantpid = B_TRUE; else goto fail1; } if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp) { unsigned int offset; uint8_t length; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); /* Attempt to satisfy the request from svpd first */ if (enp->en_u.siena.enu_svpd_length > 0) { if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd, enp->en_u.siena.enu_svpd_length, evvp->evv_tag, evvp->evv_keyword, &offset, &length)) == 0) { evvp->evv_length = length; memcpy(evvp->evv_value, enp->en_u.siena.enu_svpd + offset, length); return (0); } else if (rc != ENOENT) goto fail1; } /* And then from the provided data buffer */ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag, evvp->evv_keyword, &offset, &length)) != 0) { if (rc == ENOENT) return (rc); goto fail2; } evvp->evv_length = length; memcpy(evvp->evv_value, data + offset, length); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_vpd_set( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp) { efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); /* If the provided (tag,keyword) exists in svpd, then it is readonly */ if (enp->en_u.siena.enu_svpd_length > 0) { unsigned int offset; uint8_t length; if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd, enp->en_u.siena.enu_svpd_length, evvp->evv_tag, evvp->evv_keyword, &offset, &length)) == 0) { rc = EACCES; goto fail1; } } if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_vpd_next( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp) { _NOTE(ARGUNUSED(enp, data, size, evvp, contp)) return (ENOTSUP); } __checkReturn efx_rc_t siena_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); siena_mc_dynamic_config_hdr_t *dcfg = NULL; unsigned int vpd_offset; unsigned int dcfg_partn; unsigned int hdr_length; unsigned int pos; uint8_t cksum; size_t partn_size, dcfg_size; size_t vpd_length; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); /* Determine total length of all tags */ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0) goto fail1; /* Lock dynamic config sector for write, and read structure only */ dcfg_partn = (emip->emi_port == 1) ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0) goto fail2; if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0) goto fail3; if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn, B_FALSE, &dcfg, &dcfg_size)) != 0) goto fail4; hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0); /* Allocated memory should have room for the new VPD */ if (hdr_length + vpd_length > dcfg_size) { rc = ENOSPC; goto fail5; } /* Copy in new vpd and update header */ vpd_offset = dcfg_size - vpd_length; EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset, EFX_DWORD_0, vpd_offset); memcpy((caddr_t)dcfg + vpd_offset, data, vpd_length); EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, vpd_length); /* Update the checksum */ cksum = 0; for (pos = 0; pos < hdr_length; pos++) cksum += ((uint8_t *)dcfg)[pos]; dcfg->csum.eb_u8[0] -= cksum; /* Erase and write the new sector */ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0) goto fail6; /* Write out the new structure to nvram */ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0, (caddr_t)dcfg, vpd_offset + vpd_length)) != 0) goto fail7; EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg); siena_nvram_partn_unlock(enp, dcfg_partn); return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg); fail4: EFSYS_PROBE(fail4); siena_nvram_partn_unlock(enp, dcfg_partn); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void siena_vpd_fini( __in efx_nic_t *enp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); if (enp->en_u.siena.enu_svpd_length > 0) { EFSYS_KMEM_FREE(enp->en_esip, enp->en_u.siena.enu_svpd_length, enp->en_u.siena.enu_svpd); enp->en_u.siena.enu_svpd = NULL; enp->en_u.siena.enu_svpd_length = 0; } } #endif /* EFSYS_OPT_SIENA */ #endif /* EFSYS_OPT_VPD */