Index: head/sys/dev/sfxge/common/ef10_ev.c =================================================================== --- head/sys/dev/sfxge/common/ef10_ev.c (revision 310681) +++ head/sys/dev/sfxge/common/ef10_ev.c (revision 310682) @@ -1,1218 +1,1218 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_MON_STATS #include "mcdi_mon.h" #endif #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_QSTATS #define EFX_EV_QSTAT_INCR(_eep, _stat) \ do { \ (_eep)->ee_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_EV_QSTAT_INCR(_eep, _stat) #endif static __checkReturn boolean_t ef10_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn efx_rc_t efx_mcdi_set_evq_tmr( __in efx_nic_t *enp, __in uint32_t instance, __in uint32_t mode, __in uint32_t timer_ns) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN, MC_CMD_SET_EVQ_TMR_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_EVQ_TMR; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN; MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns); MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns); MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) { rc = EMSGSIZE; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_init_evq( __in efx_nic_t *enp, __in unsigned int instance, __in efsys_mem_t *esmp, __in size_t nevs, __in uint32_t irq, __in uint32_t us, __in boolean_t low_latency) { efx_mcdi_req_t req; uint8_t payload[ MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), MC_CMD_INIT_EVQ_OUT_LEN)]; efx_qword_t *dma_addr; uint64_t addr; int npages; int i; int ev_cut_through; efx_rc_t rc; npages = EFX_EVQ_NBUFS(nevs); if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq); /* * On Huntington RX and TX event batching can only be requested together * (even if the datapath firmware doesn't actually support RX * batching). If event cut through is enabled no RX batching will occur. * * So always enable RX and TX event batching, and enable event cut * through if we want low latency operation. */ ev_cut_through = low_latency ? 1 : 0; MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS, INIT_EVQ_IN_FLAG_INTERRUPTING, 1, INIT_EVQ_IN_FLAG_RPTR_DOS, 0, INIT_EVQ_IN_FLAG_INT_ARMD, 0, INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through, INIT_EVQ_IN_FLAG_RX_MERGE, 1, INIT_EVQ_IN_FLAG_TX_MERGE, 1); /* If the value is zero then disable the timer */ if (us == 0) { MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0); } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail2; MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks); } MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE, MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { rc = EMSGSIZE; goto fail4; } /* NOTE: ignore the returned IRQ param as firmware does not set it. */ return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_init_evq_v2( __in efx_nic_t *enp, __in unsigned int instance, __in efsys_mem_t *esmp, __in size_t nevs, __in uint32_t irq, __in uint32_t us) { efx_mcdi_req_t req; uint8_t payload[ MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), MC_CMD_INIT_EVQ_V2_OUT_LEN)]; efx_qword_t *dma_addr; uint64_t addr; int npages; int i; efx_rc_t rc; npages = EFX_EVQ_NBUFS(nevs); if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq); MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS, INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1, INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0, INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0, INIT_EVQ_V2_IN_FLAG_TYPE, MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO); /* If the value is zero then disable the timer */ if (us == 0) { MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0); } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail2; MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks); } MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE, MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) { rc = EMSGSIZE; goto fail4; } /* NOTE: ignore the returned IRQ param as firmware does not set it. */ EFSYS_PROBE1(mcdi_evq_flags, uint32_t, MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS)); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_fini_evq( __in efx_nic_t *enp, __in uint32_t instance) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN, MC_CMD_FINI_EVQ_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FINI_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_ev_init( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) return (0); } void ef10_ev_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } __checkReturn efx_rc_t ef10_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint32_t us, __in efx_evq_t *eep) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t irq; efx_rc_t rc; _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_evq_limit) { rc = EINVAL; goto fail2; } if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail3; } /* Set up the handler table */ eep->ee_rx = ef10_ev_rx; eep->ee_tx = ef10_ev_tx; eep->ee_driver = ef10_ev_driver; eep->ee_drv_gen = ef10_ev_drv_gen; eep->ee_mcdi = ef10_ev_mcdi; /* Set up the event queue */ irq = index; /* INIT_EVQ expects function-relative vector number */ /* * Interrupts may be raised for events immediately after the queue is * created. See bug58606. */ if (encp->enc_init_evq_v2_supported) { /* * On Medford the low latency license is required to enable RX * and event cut through and to disable RX batching. We let the * firmware decide the settings to use. If the adapter has a low * latency license, it will choose the best settings for low * latency, otherwise it choose the best settings for * throughput. */ rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us); if (rc != 0) goto fail4; } else { /* * On Huntington we need to specify the settings to use. We * favour latency if the adapter is running low-latency firmware * and throughput otherwise, and assume not support RX batching * implies the adapter is running low-latency firmware. (This * is how it's been done since Huntington GA. It doesn't make * much sense with hindsight as the 'low-latency' firmware * variant is also best for throughput, and does now support RX * batching). */ boolean_t low_latency = encp->enc_rx_batching_enabled ? 0 : 1; rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, low_latency); if (rc != 0) goto fail5; } return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_ev_qdestroy( __in efx_evq_t *eep) { efx_nic_t *enp = eep->ee_enp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index); } __checkReturn efx_rc_t ef10_ev_qprime( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; uint32_t rptr; efx_dword_t dword; rptr = count & eep->ee_mask; if (enp->en_nic_cfg.enc_bug35388_workaround) { EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS > (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS < (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_POPULATE_DWORD_2(dword, ERF_DD_EVQ_IND_RPTR_FLAGS, EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, ERF_DD_EVQ_IND_RPTR, (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); EFX_POPULATE_DWORD_2(dword, ERF_DD_EVQ_IND_RPTR_FLAGS, EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, ERF_DD_EVQ_IND_RPTR, rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); } else { EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, &dword, B_FALSE); } return (0); } static __checkReturn efx_rc_t efx_mcdi_driver_event( __in efx_nic_t *enp, __in uint32_t evq, __in efx_qword_t data) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN, MC_CMD_DRIVER_EVENT_OUT_LEN)]; efx_rc_t rc; req.emr_cmd = MC_CMD_DRIVER_EVENT; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN; MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq); MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO, EFX_QWORD_FIELD(data, EFX_DWORD_0)); MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI, EFX_QWORD_FIELD(data, EFX_DWORD_1)); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_ev_qpost( __in efx_evq_t *eep, __in uint16_t data) { efx_nic_t *enp = eep->ee_enp; efx_qword_t event; EFX_POPULATE_QWORD_3(event, ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV, ESF_DZ_DRV_SUB_CODE, 0, ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data); (void) efx_mcdi_driver_event(enp, eep->ee_index, event); } __checkReturn efx_rc_t ef10_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us) { efx_nic_t *enp = eep->ee_enp; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_dword_t dword; uint32_t mode; efx_rc_t rc; /* Check that hardware and MCDI use the same timer MODE values */ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS); EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START); EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START); EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF); if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail1; } /* If the value is zero then disable the timer */ if (us == 0) { mode = FFE_CZ_TIMER_MODE_DIS; } else { mode = FFE_CZ_TIMER_MODE_INT_HLDOFF; } if (encp->enc_bug61265_workaround) { uint32_t ns = us * 1000; rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns); if (rc != 0) goto fail2; } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail3; if (encp->enc_bug35388_workaround) { EFX_POPULATE_DWORD_3(dword, ERF_DD_EVQ_IND_TIMER_FLAGS, EFE_DD_EVQ_IND_TIMER_FLAGS, ERF_DD_EVQ_IND_TIMER_MODE, mode, ERF_DD_EVQ_IND_TIMER_VAL, ticks); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, 0); } else { EFX_POPULATE_DWORD_2(dword, ERF_DZ_TC_TIMER_MODE, mode, ERF_DZ_TC_TIMER_VAL, ticks); EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG, eep->ee_index, &dword, 0); } } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_QSTATS void ef10_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) { unsigned int id; for (id = 0; id < EV_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, eep->ee_stat[id]); eep->ee_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ static __checkReturn boolean_t ef10_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; uint32_t size; uint32_t label; uint32_t mac_class; uint32_t eth_tag_class; uint32_t l3_class; uint32_t l4_class; uint32_t next_read_lbits; uint16_t flags; boolean_t cont; boolean_t should_abort; efx_evq_rxq_state_t *eersp; unsigned int desc_count; unsigned int last_used_id; EFX_EV_QSTAT_INCR(eep, EV_RX); /* Discard events after RXQ/TXQ errors */ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) return (B_FALSE); /* Basic packet information */ size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS); cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { /* Drop this event */ return (B_FALSE); } flags = 0; if (cont != 0) { /* * This may be part of a scattered frame, or it may be a * truncated frame if scatter is disabled on this RXQ. * Overlength frames can be received if e.g. a VF is configured * for 1500 MTU but connected to a port set to 9000 MTU * (see bug56567). * FIXME: There is not yet any driver that supports scatter on * Huntington. Scatter support is required for OSX. */ flags |= EFX_PKT_CONT; } if (mac_class == ESE_DZ_MAC_CLASS_UCAST) flags |= EFX_PKT_UNICAST; /* Increment the count of descriptors read */ eersp = &eep->ee_rxq_state[label]; desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) & EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); eersp->eers_rx_read_ptr += desc_count; /* * FIXME: add error checking to make sure this a batched event. * This could also be an aborted scatter, see Bug36629. */ if (desc_count > 1) { EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); flags |= EFX_PKT_PREFIX_LEN; } /* Calculate the index of the last descriptor consumed */ last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; /* Check for errors that invalidate checksum and L3/L4 fields */ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { /* RX frame truncated (error flag is misnamed) */ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); flags |= EFX_DISCARD; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { /* Bad Ethernet frame CRC */ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); flags |= EFX_DISCARD; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { /* * Hardware parse failed, due to malformed headers * or headers that are too long for the parser. * Headers and checksums must be validated by the host. */ - // TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); + /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */ goto deliver; } if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) || (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) { flags |= EFX_PKT_VLAN_TAGGED; } switch (l3_class) { case ESE_DZ_L3_CLASS_IP4: case ESE_DZ_L3_CLASS_IP4_FRAG: flags |= EFX_PKT_IPV4; if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) { EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); } else { flags |= EFX_CKSUM_IPV4; } if (l4_class == ESE_DZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); flags |= EFX_PKT_TCP; } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); flags |= EFX_PKT_UDP; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); } break; case ESE_DZ_L3_CLASS_IP6: case ESE_DZ_L3_CLASS_IP6_FRAG: flags |= EFX_PKT_IPV6; if (l4_class == ESE_DZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); flags |= EFX_PKT_TCP; } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); flags |= EFX_PKT_UDP; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); } break; default: EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); break; } if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) { if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); } else { flags |= EFX_CKSUM_TCPUDP; } } deliver: /* If we're not discarding the packet then it is ok */ if (~flags & EFX_DISCARD) EFX_EV_QSTAT_INCR(eep, EV_RX_OK); EFSYS_ASSERT(eecp->eec_rx != NULL); should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags); return (should_abort); } static __checkReturn boolean_t ef10_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; uint32_t id; uint32_t label; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_TX); /* Discard events after RXQ/TXQ errors */ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) return (B_FALSE); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) { /* Drop this event */ return (B_FALSE); } /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX); label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL); EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); EFSYS_ASSERT(eecp->eec_tx != NULL); should_abort = eecp->eec_tx(arg, label, id); return (should_abort); } static __checkReturn boolean_t ef10_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { unsigned int code; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRIVER); should_abort = B_FALSE; code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE); switch (code) { case ESE_DZ_DRV_TIMER_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID); EFSYS_ASSERT(eecp->eec_timer != NULL); should_abort = eecp->eec_timer(arg, id); break; } case ESE_DZ_DRV_WAKE_UP_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID); EFSYS_ASSERT(eecp->eec_wake_up != NULL); should_abort = eecp->eec_wake_up(arg, id); break; } case ESE_DZ_DRV_START_UP_EV: EFSYS_ASSERT(eecp->eec_initialized != NULL); should_abort = eecp->eec_initialized(arg); break; default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); break; } return (should_abort); } static __checkReturn boolean_t ef10_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t data; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); should_abort = B_FALSE; data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0); if (data >= ((uint32_t)1 << 16)) { EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); return (B_TRUE); } EFSYS_ASSERT(eecp->eec_software != NULL); should_abort = eecp->eec_software(arg, (uint16_t)data); return (should_abort); } static __checkReturn boolean_t ef10_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; unsigned code; boolean_t should_abort = B_FALSE; EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); switch (code) { case MCDI_EVENT_CODE_BADSSERT: efx_mcdi_ev_death(enp, EINTR); break; case MCDI_EVENT_CODE_CMDDONE: efx_mcdi_ev_cpl(enp, MCDI_EV_FIELD(eqp, CMDDONE_SEQ), MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); break; #if EFSYS_OPT_MCDI_PROXY_AUTH case MCDI_EVENT_CODE_PROXY_RESPONSE: /* * This event notifies a function that an authorization request * has been processed. If the request was authorized then the * function can now re-send the original MCDI request. * See SF-113652-SW "SR-IOV Proxied Network Access Control". */ efx_mcdi_ev_proxy_response(enp, MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE), MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC)); break; #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ case MCDI_EVENT_CODE_LINKCHANGE: { efx_link_mode_t link_mode; ef10_phy_link_ev(enp, eqp, &link_mode); should_abort = eecp->eec_link_change(arg, link_mode); break; } case MCDI_EVENT_CODE_SENSOREVT: { #if EFSYS_OPT_MON_STATS efx_mon_stat_t id; efx_mon_stat_value_t value; efx_rc_t rc; /* Decode monitor stat for MCDI sensor (if supported) */ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) { /* Report monitor stat change */ should_abort = eecp->eec_monitor(arg, id, value); } else if (rc == ENOTSUP) { should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_SENSOREVT, MCDI_EV_FIELD(eqp, DATA)); } else { EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ } #endif break; } case MCDI_EVENT_CODE_SCHEDERR: /* Informational only */ break; case MCDI_EVENT_CODE_REBOOT: /* Falcon/Siena only (should not been seen with Huntington). */ efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MC_REBOOT: /* MC_REBOOT event is used for Huntington (EF10) and later. */ efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: #if EFSYS_OPT_MAC_STATS if (eecp->eec_mac_stats != NULL) { eecp->eec_mac_stats(arg, MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); } #endif break; case MCDI_EVENT_CODE_FWALERT: { uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_FWALERT_SRAM, MCDI_EV_FIELD(eqp, FWALERT_DATA)); else should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_FWALERT, MCDI_EV_FIELD(eqp, DATA)); break; } case MCDI_EVENT_CODE_TX_ERR: { /* * After a TXQ error is detected, firmware sends a TX_ERR event. * This may be followed by TX completions (which we discard), * and then finally by a TX_FLUSH event. Firmware destroys the * TXQ automatically after sending the TX_FLUSH event. */ enp->en_reset_flags |= EFX_RESET_TXQ_ERR; EFSYS_PROBE2(tx_descq_err, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); /* Inform the driver that a reset is required. */ eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR, MCDI_EV_FIELD(eqp, TX_ERR_DATA)); break; } case MCDI_EVENT_CODE_TX_FLUSH: { uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ); /* * EF10 firmware sends two TX_FLUSH events: one to the txq's * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set). * We want to wait for all completions, so ignore the events * with TX_FLUSH_TO_DRIVER. */ if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) { should_abort = B_FALSE; break; } EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); should_abort = eecp->eec_txq_flush_done(arg, txq_index); break; } case MCDI_EVENT_CODE_RX_ERR: { /* * After an RXQ error is detected, firmware sends an RX_ERR * event. This may be followed by RX events (which we discard), * and then finally by an RX_FLUSH event. Firmware destroys the * RXQ automatically after sending the RX_FLUSH event. */ enp->en_reset_flags |= EFX_RESET_RXQ_ERR; EFSYS_PROBE2(rx_descq_err, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); /* Inform the driver that a reset is required. */ eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR, MCDI_EV_FIELD(eqp, RX_ERR_DATA)); break; } case MCDI_EVENT_CODE_RX_FLUSH: { uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ); /* * EF10 firmware sends two RX_FLUSH events: one to the rxq's * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set). * We want to wait for all completions, so ignore the events * with RX_FLUSH_TO_DRIVER. */ if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) { should_abort = B_FALSE; break; } EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); break; } default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); break; } return (should_abort); } void ef10_ev_rxlabel_init( __in efx_evq_t *eep, __in efx_rxq_t *erp, __in unsigned int label) { efx_evq_rxq_state_t *eersp; EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); eersp = &eep->ee_rxq_state[label]; EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0); eersp->eers_rx_read_ptr = 0; eersp->eers_rx_mask = erp->er_mask; } void ef10_ev_rxlabel_fini( __in efx_evq_t *eep, __in unsigned int label) { efx_evq_rxq_state_t *eersp; EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); eersp = &eep->ee_rxq_state[label]; EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0); eersp->eers_rx_read_ptr = 0; eersp->eers_rx_mask = 0; } #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: head/sys/dev/sfxge/common/ef10_nvram.c =================================================================== --- head/sys/dev/sfxge/common/ef10_nvram.c (revision 310681) +++ head/sys/dev/sfxge/common/ef10_nvram.c (revision 310682) @@ -1,2368 +1,2368 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM #include "ef10_tlv_layout.h" /* Cursor for TLV partition format */ typedef struct tlv_cursor_s { uint32_t *block; /* Base of data block */ uint32_t *current; /* Cursor position */ uint32_t *end; /* End tag position */ uint32_t *limit; /* Last dword of data block */ } tlv_cursor_t; typedef struct nvram_partition_s { uint16_t type; uint8_t chip_select; uint8_t flags; /* * The full length of the NVRAM partition. * This is different from tlv_partition_header.total_length, * which can be smaller. */ uint32_t length; uint32_t erase_size; uint32_t *data; tlv_cursor_t tlv_cursor; } nvram_partition_t; static __checkReturn efx_rc_t tlv_validate_state( __inout tlv_cursor_t *cursor); static void tlv_init_block( __out uint32_t *block) { *block = __CPU_TO_LE_32(TLV_TAG_END); } static uint32_t tlv_tag( __in tlv_cursor_t *cursor) { uint32_t dword, tag; dword = cursor->current[0]; tag = __LE_TO_CPU_32(dword); return (tag); } static size_t tlv_length( __in tlv_cursor_t *cursor) { uint32_t dword, length; if (tlv_tag(cursor) == TLV_TAG_END) return (0); dword = cursor->current[1]; length = __LE_TO_CPU_32(dword); return ((size_t)length); } static uint8_t * tlv_value( __in tlv_cursor_t *cursor) { if (tlv_tag(cursor) == TLV_TAG_END) return (NULL); return ((uint8_t *)(&cursor->current[2])); } static uint8_t * tlv_item( __in tlv_cursor_t *cursor) { if (tlv_tag(cursor) == TLV_TAG_END) return (NULL); return ((uint8_t *)cursor->current); } /* * TLV item DWORD length is tag + length + value (rounded up to DWORD) * equivalent to tlv_n_words_for_len in mc-comms tlv.c */ #define TLV_DWORD_COUNT(length) \ (1 + 1 + (((length) + sizeof (uint32_t) - 1) / sizeof (uint32_t))) static uint32_t * tlv_next_item_ptr( __in tlv_cursor_t *cursor) { uint32_t length; length = tlv_length(cursor); return (cursor->current + TLV_DWORD_COUNT(length)); } static __checkReturn efx_rc_t tlv_advance( __inout tlv_cursor_t *cursor) { efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (cursor->current == cursor->end) { /* No more tags after END tag */ cursor->current = NULL; rc = ENOENT; goto fail2; } /* Advance to next item and validate */ cursor->current = tlv_next_item_ptr(cursor); if ((rc = tlv_validate_state(cursor)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static efx_rc_t tlv_rewind( __in tlv_cursor_t *cursor) { efx_rc_t rc; cursor->current = cursor->block; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static efx_rc_t tlv_find( __inout tlv_cursor_t *cursor, __in uint32_t tag) { efx_rc_t rc; rc = tlv_rewind(cursor); while (rc == 0) { if (tlv_tag(cursor) == tag) break; rc = tlv_advance(cursor); } return (rc); } static __checkReturn efx_rc_t tlv_validate_state( __inout tlv_cursor_t *cursor) { efx_rc_t rc; /* Check cursor position */ if (cursor->current < cursor->block) { rc = EINVAL; goto fail1; } if (cursor->current > cursor->limit) { rc = EINVAL; goto fail2; } if (tlv_tag(cursor) != TLV_TAG_END) { /* Check current item has space for tag and length */ if (cursor->current > (cursor->limit - 2)) { cursor->current = NULL; rc = EFAULT; goto fail3; } /* Check we have value data for current item and another tag */ if (tlv_next_item_ptr(cursor) > (cursor->limit - 1)) { cursor->current = NULL; rc = EFAULT; goto fail4; } } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static efx_rc_t tlv_init_cursor( __out tlv_cursor_t *cursor, __in uint32_t *block, __in uint32_t *limit, __in uint32_t *current) { cursor->block = block; cursor->limit = limit; cursor->current = current; cursor->end = NULL; return (tlv_validate_state(cursor)); } static __checkReturn efx_rc_t tlv_init_cursor_from_size( __out tlv_cursor_t *cursor, __in_bcount(size) uint8_t *block, __in size_t size) { uint32_t *limit; limit = (uint32_t *)(block + size - sizeof (uint32_t)); return (tlv_init_cursor(cursor, (uint32_t *)block, limit, (uint32_t *)block)); } static __checkReturn efx_rc_t tlv_init_cursor_at_offset( __out tlv_cursor_t *cursor, __in_bcount(size) uint8_t *block, __in size_t size, __in size_t offset) { uint32_t *limit; uint32_t *current; limit = (uint32_t *)(block + size - sizeof (uint32_t)); current = (uint32_t *)(block + offset); return (tlv_init_cursor(cursor, (uint32_t *)block, limit, current)); } static __checkReturn efx_rc_t tlv_require_end( __inout tlv_cursor_t *cursor) { uint32_t *pos; efx_rc_t rc; if (cursor->end == NULL) { pos = cursor->current; if ((rc = tlv_find(cursor, TLV_TAG_END)) != 0) goto fail1; cursor->end = cursor->current; cursor->current = pos; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static size_t tlv_block_length_used( __inout tlv_cursor_t *cursor) { efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if ((rc = tlv_require_end(cursor)) != 0) goto fail2; /* Return space used (including the END tag) */ return (cursor->end + 1 - cursor->block) * sizeof (uint32_t); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (0); } static uint32_t * tlv_last_segment_end( __in tlv_cursor_t *cursor) { tlv_cursor_t segment_cursor; uint32_t *last_segment_end = cursor->block; uint32_t *segment_start = cursor->block; /* * Go through each segment and check that it has an end tag. If there * is no end tag then the previous segment was the last valid one, * so return the pointer to its end tag. */ for (;;) { if (tlv_init_cursor(&segment_cursor, segment_start, cursor->limit, segment_start) != 0) break; if (tlv_require_end(&segment_cursor) != 0) break; last_segment_end = segment_cursor.end; segment_start = segment_cursor.end + 1; } return (last_segment_end); } static uint32_t * tlv_write( __in tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { uint32_t len = size; uint32_t *ptr; ptr = cursor->current; *ptr++ = __CPU_TO_LE_32(tag); *ptr++ = __CPU_TO_LE_32(len); if (len > 0) { ptr[(len - 1) / sizeof (uint32_t)] = 0; memcpy(ptr, data, len); ptr += P2ROUNDUP(len, sizeof (uint32_t)) / sizeof (*ptr); } return (ptr); } static __checkReturn efx_rc_t tlv_insert( __inout tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { unsigned int delta; uint32_t *last_segment_end; efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if ((rc = tlv_require_end(cursor)) != 0) goto fail2; if (tag == TLV_TAG_END) { rc = EINVAL; goto fail3; } last_segment_end = tlv_last_segment_end(cursor); delta = TLV_DWORD_COUNT(size); if (last_segment_end + 1 + delta > cursor->limit) { rc = ENOSPC; goto fail4; } /* Move data up: new space at cursor->current */ memmove(cursor->current + delta, cursor->current, (last_segment_end + 1 - cursor->current) * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end += delta; /* Write new TLV item */ tlv_write(cursor, tag, data, size); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t tlv_delete( __inout tlv_cursor_t *cursor) { unsigned int delta; uint32_t *last_segment_end; efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (tlv_tag(cursor) == TLV_TAG_END) { rc = EINVAL; goto fail2; } delta = TLV_DWORD_COUNT(tlv_length(cursor)); if ((rc = tlv_require_end(cursor)) != 0) goto fail3; last_segment_end = tlv_last_segment_end(cursor); /* Shuffle things down, destroying the item at cursor->current */ memmove(cursor->current, cursor->current + delta, (last_segment_end + 1 - cursor->current) * sizeof (uint32_t)); /* Zero the new space at the end of the TLV chain */ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end -= delta; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t tlv_modify( __inout tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { uint32_t *pos; unsigned int old_ndwords; unsigned int new_ndwords; unsigned int delta; uint32_t *last_segment_end; efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (tlv_tag(cursor) == TLV_TAG_END) { rc = EINVAL; goto fail2; } if (tlv_tag(cursor) != tag) { rc = EINVAL; goto fail3; } old_ndwords = TLV_DWORD_COUNT(tlv_length(cursor)); new_ndwords = TLV_DWORD_COUNT(size); if ((rc = tlv_require_end(cursor)) != 0) goto fail4; last_segment_end = tlv_last_segment_end(cursor); if (new_ndwords > old_ndwords) { /* Expand space used for TLV item */ delta = new_ndwords - old_ndwords; pos = cursor->current + old_ndwords; if (last_segment_end + 1 + delta > cursor->limit) { rc = ENOSPC; goto fail5; } /* Move up: new space at (cursor->current + old_ndwords) */ memmove(pos + delta, pos, (last_segment_end + 1 - pos) * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end += delta; } else if (new_ndwords < old_ndwords) { /* Shrink space used for TLV item */ delta = old_ndwords - new_ndwords; pos = cursor->current + new_ndwords; /* Move down: remove words at (cursor->current + new_ndwords) */ memmove(pos, pos + delta, (last_segment_end + 1 - pos) * sizeof (uint32_t)); /* Zero the new space at the end of the TLV chain */ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end -= delta; } /* Write new data */ tlv_write(cursor, tag, data, size); return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static uint32_t checksum_tlv_partition( __in nvram_partition_t *partition) { tlv_cursor_t *cursor; uint32_t *ptr; uint32_t *end; uint32_t csum; size_t len; cursor = &partition->tlv_cursor; len = tlv_block_length_used(cursor); EFSYS_ASSERT3U((len & 3), ==, 0); csum = 0; ptr = partition->data; end = &ptr[len >> 2]; while (ptr < end) csum += __LE_TO_CPU_32(*ptr++); return (csum); } static __checkReturn efx_rc_t tlv_update_partition_len_and_cks( __in tlv_cursor_t *cursor) { efx_rc_t rc; nvram_partition_t partition; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t new_len; /* * We just modified the partition, so the total length may not be * valid. Don't use tlv_find(), which performs some sanity checks * that may fail here. */ partition.data = cursor->block; memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor)); header = (struct tlv_partition_header *)partition.data; /* Sanity check. */ if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) { rc = EFAULT; goto fail1; } new_len = tlv_block_length_used(&partition.tlv_cursor); if (new_len == 0) { rc = EFAULT; goto fail2; } header->total_length = __CPU_TO_LE_32(new_len); /* Ensure the modified partition always has a new generation count. */ header->generation = __CPU_TO_LE_32( __LE_TO_CPU_32(header->generation) + 1); trailer = (struct tlv_partition_trailer *)((uint8_t *)header + new_len - sizeof (*trailer) - sizeof (uint32_t)); trailer->generation = header->generation; trailer->checksum = __CPU_TO_LE_32( __LE_TO_CPU_32(trailer->checksum) - checksum_tlv_partition(&partition)); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Validate buffer contents (before writing to flash) */ __checkReturn efx_rc_t ef10_nvram_buffer_validate( __in efx_nic_t *enp, __in uint32_t partn, __in_bcount(partn_size) caddr_t partn_data, __in size_t partn_size) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t total_length; uint32_t cksum; int pos; efx_rc_t rc; EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK); if ((partn_data == NULL) || (partn_size == 0)) { rc = EINVAL; goto fail1; } /* The partition header must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)partn_data, partn_size)) != 0) { rc = EFAULT; goto fail2; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail3; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV partition length (includes the END tag) */ total_length = __LE_TO_CPU_32(header->total_length); if (total_length > partn_size) { rc = EFBIG; goto fail4; } /* Check partition ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail5; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail6; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail7; } /* Check generation counts are consistent */ if (trailer->generation != header->generation) { rc = EINVAL; goto fail8; } /* Verify partition checksum */ cksum = 0; for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(partn_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail9; } return (0); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_create( __in efx_nic_t *enp, __in uint16_t partn_type, __in_bcount(partn_size) caddr_t partn_data, __in size_t partn_size) { uint32_t *buf = (uint32_t *)partn_data; efx_rc_t rc; tlv_cursor_t cursor; struct tlv_partition_header header; struct tlv_partition_trailer trailer; unsigned min_buf_size = sizeof (struct tlv_partition_header) + sizeof (struct tlv_partition_trailer); if (partn_size < min_buf_size) { rc = EINVAL; goto fail1; } memset(buf, 0xff, partn_size); tlv_init_block(buf); if ((rc = tlv_init_cursor(&cursor, buf, (uint32_t *)((uint8_t *)buf + partn_size), buf)) != 0) { goto fail2; } header.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_HEADER); header.length = __CPU_TO_LE_32(sizeof (header) - 8); header.type_id = __CPU_TO_LE_16(partn_type); header.preset = 0; header.generation = __CPU_TO_LE_32(1); header.total_length = 0; /* This will be fixed below. */ if ((rc = tlv_insert( &cursor, TLV_TAG_PARTITION_HEADER, (uint8_t *)&header.type_id, sizeof (header) - 8)) != 0) goto fail3; if ((rc = tlv_advance(&cursor)) != 0) goto fail4; trailer.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_TRAILER); trailer.length = __CPU_TO_LE_32(sizeof (trailer) - 8); trailer.generation = header.generation; trailer.checksum = 0; /* This will be fixed below. */ if ((rc = tlv_insert(&cursor, TLV_TAG_PARTITION_TRAILER, (uint8_t *)&trailer.generation, sizeof (trailer) - 8)) != 0) goto fail5; if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0) goto fail6; /* Check that the partition is valid. */ if ((rc = ef10_nvram_buffer_validate(enp, partn_type, partn_data, partn_size)) != 0) goto fail7; return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static uint32_t byte_offset( __in uint32_t *position, __in uint32_t *base) { return (uint32_t)((uint8_t *)position - (uint8_t *)base); } __checkReturn efx_rc_t ef10_nvram_buffer_find_item_start( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __out uint32_t *startp) { - // Read past partition header to find start address of the first key + /* Read past partition header to find start address of the first key */ tlv_cursor_t cursor; efx_rc_t rc; /* A PARTITION_HEADER tag must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail3; } *startp = byte_offset(cursor.current, cursor.block); if ((rc = tlv_require_end(&cursor)) != 0) goto fail4; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_find_end( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *endp) { - // Read to end of partition + /* Read to end of partition */ tlv_cursor_t cursor; efx_rc_t rc; uint32_t *segment_used; _NOTE(ARGUNUSED(offset)) if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } segment_used = cursor.block; /* * Go through each segment and check that it has an end tag. If there * is no end tag then the previous segment was the last valid one, * so return the used space including that end tag. */ while (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) { if (tlv_require_end(&cursor) != 0) { if (segment_used == cursor.block) { /* * First segment is corrupt, so there is * no valid data in partition. */ rc = EINVAL; goto fail2; } break; } segment_used = cursor.end + 1; cursor.current = segment_used; } /* Return space used (including the END tag) */ *endp = (segment_used - cursor.block) * sizeof (uint32_t); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn __success(return != B_FALSE) boolean_t ef10_nvram_buffer_find_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, __out uint32_t *lengthp) { - // Find TLV at offset and return key start and length + /* Find TLV at offset and return key start and length */ tlv_cursor_t cursor; uint8_t *key; uint32_t tag; if (tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset) != 0) { return (B_FALSE); } while ((key = tlv_item(&cursor)) != NULL) { tag = tlv_tag(&cursor); if (tag == TLV_TAG_PARTITION_HEADER || tag == TLV_TAG_PARTITION_TRAILER) { if (tlv_advance(&cursor) != 0) { break; } continue; } *startp = byte_offset(cursor.current, cursor.block); *lengthp = byte_offset(tlv_next_item_ptr(&cursor), cursor.current); return (B_TRUE); } return (B_FALSE); } __checkReturn efx_rc_t ef10_nvram_buffer_get_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(item_max_size, *lengthp) caddr_t itemp, __in size_t item_max_size, __out uint32_t *lengthp) { efx_rc_t rc; tlv_cursor_t cursor; uint32_t item_length; if (item_max_size < length) { rc = ENOSPC; goto fail1; } if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset)) != 0) { goto fail2; } item_length = tlv_length(&cursor); if (length < item_length) { rc = ENOSPC; goto fail3; } memcpy(itemp, tlv_value(&cursor), item_length); *lengthp = item_length; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_insert_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, __out uint32_t *lengthp) { efx_rc_t rc; tlv_cursor_t cursor; if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset)) != 0) { goto fail1; } rc = tlv_insert(&cursor, TLV_TAG_LICENSE, (uint8_t *)keyp, length); if (rc != 0) { goto fail2; } *lengthp = byte_offset(tlv_next_item_ptr(&cursor), cursor.current); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_delete_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end) { efx_rc_t rc; tlv_cursor_t cursor; _NOTE(ARGUNUSED(length, end)) if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset)) != 0) { goto fail1; } if ((rc = tlv_delete(&cursor)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_finish( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size) { efx_rc_t rc; tlv_cursor_t cursor; if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } if ((rc = tlv_require_end(&cursor)) != 0) goto fail2; if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Read and validate a segment from a partition. A segment is a complete * tlv chain between PARTITION_HEADER and PARTITION_END tags. There may * be multiple segments in a partition, so seg_offset allows segments * beyond the first to be read. */ static __checkReturn efx_rc_t ef10_nvram_read_tlv_segment( __in efx_nic_t *enp, __in uint32_t partn, __in size_t seg_offset, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t total_length; uint32_t cksum; int pos; efx_rc_t rc; EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK); if ((seg_data == NULL) || (max_seg_size == 0)) { rc = EINVAL; goto fail1; } /* Read initial chunk of the segment, starting at offset */ if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset, seg_data, EF10_NVRAM_CHUNK, MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) { goto fail2; } /* A PARTITION_HEADER tag must be the first item at the given offset */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail3; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail4; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV segment length (includes the END tag) */ total_length = __LE_TO_CPU_32(header->total_length); if (total_length > max_seg_size) { rc = EFBIG; goto fail5; } /* Read the remaining segment content */ if (total_length > EF10_NVRAM_CHUNK) { if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset + EF10_NVRAM_CHUNK, seg_data + EF10_NVRAM_CHUNK, total_length - EF10_NVRAM_CHUNK, MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) goto fail6; } /* Check segment ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail7; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail8; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail9; } /* Check data read from segment is consistent */ if (trailer->generation != header->generation) { /* * The partition data may have been modified between successive * MCDI NVRAM_READ requests by the MC or another PCI function. * * The caller must retry to obtain consistent partition data. */ rc = EAGAIN; goto fail10; } /* Verify segment checksum */ cksum = 0; for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail11; } return (0); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Read a single TLV item from a host memory * buffer containing a TLV formatted segment. */ __checkReturn efx_rc_t ef10_nvram_buf_read_tlv( __in efx_nic_t *enp, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep) { tlv_cursor_t cursor; caddr_t data; size_t length; caddr_t value; efx_rc_t rc; if ((seg_data == NULL) || (max_seg_size == 0)) { rc = EINVAL; goto fail1; } /* Find requested TLV tag in segment data */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail2; } if ((rc = tlv_find(&cursor, tag)) != 0) { rc = ENOENT; goto fail3; } value = (caddr_t)tlv_value(&cursor); length = tlv_length(&cursor); if (length == 0) data = NULL; else { /* Copy out data from TLV item */ EFSYS_KMEM_ALLOC(enp->en_esip, length, data); if (data == NULL) { rc = ENOMEM; goto fail4; } memcpy(data, value, length); } *datap = data; *sizep = length; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Read a single TLV item from the first segment in a TLV formatted partition */ __checkReturn efx_rc_t ef10_nvram_partn_read_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __deref_out_bcount_opt(*seg_sizep) caddr_t *seg_datap, __out size_t *seg_sizep) { caddr_t seg_data = NULL; size_t partn_size = 0; size_t length; caddr_t data; int retry; efx_rc_t rc; /* Allocate sufficient memory for the entire partition */ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0) goto fail1; if (partn_size == 0) { rc = ENOENT; goto fail2; } EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, seg_data); if (seg_data == NULL) { rc = ENOMEM; goto fail3; } /* * Read the first segment in a TLV partition. Retry until consistent * segment contents are returned. Inconsistent data may be read if: * a) the segment contents are invalid * b) the MC has rebooted while we were reading the partition * c) the partition has been modified while we were reading it * Limit retry attempts to ensure forward progress. */ retry = 10; do { rc = ef10_nvram_read_tlv_segment(enp, partn, 0, seg_data, partn_size); } while ((rc == EAGAIN) && (--retry > 0)); if (rc != 0) { /* Failed to obtain consistent segment data */ goto fail4; } if ((rc = ef10_nvram_buf_read_tlv(enp, seg_data, partn_size, tag, &data, &length)) != 0) goto fail5; EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data); *seg_datap = data; *seg_sizep = length; return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Compute the size of a segment. */ static __checkReturn efx_rc_t ef10_nvram_buf_segment_size( __in caddr_t seg_data, __in size_t max_seg_size, __out size_t *seg_sizep) { efx_rc_t rc; tlv_cursor_t cursor; struct tlv_partition_header *header; uint32_t cksum; int pos; uint32_t *end_tag_position; uint32_t segment_length; /* A PARTITION_HEADER tag must be the first item at the given offset */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV segment length (includes the END tag) */ *seg_sizep = __LE_TO_CPU_32(header->total_length); if (*seg_sizep > max_seg_size) { rc = EFBIG; goto fail3; } /* Check segment ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail4; } if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail5; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail6; } end_tag_position = cursor.current; /* Verify segment checksum */ cksum = 0; for (pos = 0; (size_t)pos < *seg_sizep; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail7; } /* * Calculate total length from HEADER to END tags and compare to * max_seg_size and the total_length field in the HEADER tag. */ segment_length = tlv_block_length_used(&cursor); if (segment_length > max_seg_size) { rc = EINVAL; goto fail8; } if (segment_length != *seg_sizep) { rc = EINVAL; goto fail9; } /* Skip over the first HEADER tag. */ rc = tlv_rewind(&cursor); rc = tlv_advance(&cursor); while (rc == 0) { if (tlv_tag(&cursor) == TLV_TAG_END) { /* Check that the END tag is the one found earlier. */ if (cursor.current != end_tag_position) goto fail10; break; } /* Check for duplicate HEADER tags before the END tag. */ if (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail11; } rc = tlv_advance(&cursor); } if (rc != 0) goto fail12; return (0); fail12: EFSYS_PROBE(fail12); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in a host memory buffer containing a TLV * formatted segment. Historically partitions consisted of only one segment. */ __checkReturn efx_rc_t ef10_nvram_buf_write_tlv( __inout_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __in_bcount(tag_size) caddr_t tag_data, __in size_t tag_size, __out size_t *total_lengthp) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; uint32_t generation; uint32_t cksum; int pos; efx_rc_t rc; /* A PARTITION_HEADER tag must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Update the TLV chain to contain the new data */ if ((rc = tlv_find(&cursor, tag)) == 0) { /* Modify existing TLV item */ if ((rc = tlv_modify(&cursor, tag, (uint8_t *)tag_data, tag_size)) != 0) goto fail3; } else { /* Insert a new TLV item before the PARTITION_TRAILER */ rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER); if (rc != 0) { rc = EINVAL; goto fail4; } if ((rc = tlv_insert(&cursor, tag, (uint8_t *)tag_data, tag_size)) != 0) { rc = EINVAL; goto fail5; } } /* Find the trailer tag */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail6; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); /* Update PARTITION_HEADER and PARTITION_TRAILER fields */ *total_lengthp = tlv_block_length_used(&cursor); if (*total_lengthp > max_seg_size) { rc = ENOSPC; goto fail7; } generation = __LE_TO_CPU_32(header->generation) + 1; header->total_length = __CPU_TO_LE_32(*total_lengthp); header->generation = __CPU_TO_LE_32(generation); trailer->generation = __CPU_TO_LE_32(generation); /* Recompute PARTITION_TRAILER checksum */ trailer->checksum = 0; cksum = 0; for (pos = 0; (size_t)pos < *total_lengthp; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } trailer->checksum = ~cksum + 1; return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in the first segment of a TLV formatted * dynamic config partition. The first segment is the current active * configuration. */ __checkReturn efx_rc_t ef10_nvram_partn_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size) { return ef10_nvram_partn_write_segment_tlv(enp, partn, tag, data, size, B_FALSE); } /* * Read a segment from nvram at the given offset into a buffer (segment_data) * and optionally write a new tag to it. */ static __checkReturn efx_rc_t ef10_nvram_segment_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __inout caddr_t *seg_datap, __inout size_t *partn_offsetp, __inout size_t *src_remain_lenp, __inout size_t *dest_remain_lenp, __in boolean_t write) { efx_rc_t rc; efx_rc_t status; size_t original_segment_size; size_t modified_segment_size; /* * Read the segment from NVRAM into the segment_data buffer and validate * it, returning if it does not validate. This is not a failure unless * this is the first segment in a partition. In this case the caller * must propagate the error. */ status = ef10_nvram_read_tlv_segment(enp, partn, *partn_offsetp, *seg_datap, *src_remain_lenp); if (status != 0) { rc = EINVAL; goto fail1; } status = ef10_nvram_buf_segment_size(*seg_datap, *src_remain_lenp, &original_segment_size); if (status != 0) { rc = EINVAL; goto fail2; } if (write) { /* Update the contents of the segment in the buffer */ if ((rc = ef10_nvram_buf_write_tlv(*seg_datap, *dest_remain_lenp, tag, data, size, &modified_segment_size)) != 0) { goto fail3; } *dest_remain_lenp -= modified_segment_size; *seg_datap += modified_segment_size; } else { /* * We won't modify this segment, but still need to update the * remaining lengths and pointers. */ *dest_remain_lenp -= original_segment_size; *seg_datap += original_segment_size; } *partn_offsetp += original_segment_size; *src_remain_lenp -= original_segment_size; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in either the first segment or in all * segments in a TLV formatted dynamic config partition. Dynamic config * partitions on boards that support RFID are divided into a number of segments, * each formatted like a partition, with header, trailer and end tags. The first * segment is the current active configuration. * * The segments are initialised by manftest and each contain a different * configuration e.g. firmware variant. The firmware can be instructed * via RFID to copy a segment to replace the first segment, hence changing the * active configuration. This allows ops to change the configuration of a board * prior to shipment using RFID. * * Changes to the dynamic config may need to be written to all segments (e.g. * firmware versions) or just the first segment (changes to the active * configuration). See SF-111324-SW "The use of RFID in Solarflare Products". * If only the first segment is written the code still needs to be aware of the * possible presence of subsequent segments as writing to a segment may cause * its size to increase, which would overwrite the subsequent segments and * invalidate them. */ __checkReturn efx_rc_t ef10_nvram_partn_write_segment_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t all_segments) { size_t partn_size = 0; caddr_t partn_data; size_t total_length = 0; efx_rc_t rc; size_t current_offset = 0; size_t remaining_original_length; size_t remaining_modified_length; caddr_t segment_data; EFSYS_ASSERT3U(partn, ==, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG); /* Allocate sufficient memory for the entire partition */ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0) goto fail1; EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, partn_data); if (partn_data == NULL) { rc = ENOMEM; goto fail2; } remaining_original_length = partn_size; remaining_modified_length = partn_size; segment_data = partn_data; /* Lock the partition */ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0) goto fail3; /* Iterate over each (potential) segment to update it. */ do { boolean_t write = all_segments || current_offset == 0; rc = ef10_nvram_segment_write_tlv(enp, partn, tag, data, size, &segment_data, ¤t_offset, &remaining_original_length, &remaining_modified_length, write); if (rc != 0) { if (current_offset == 0) { /* * If no data has been read then the first * segment is invalid, which is an error. */ goto fail4; } break; } } while (current_offset < partn_size); total_length = segment_data - partn_data; /* * We've run out of space. This should actually be dealt with by * ef10_nvram_buf_write_tlv returning ENOSPC. */ if (total_length > partn_size) { rc = ENOSPC; goto fail5; } /* Erase the whole partition in NVRAM */ if ((rc = ef10_nvram_partn_erase(enp, partn, 0, partn_size)) != 0) goto fail6; /* Write new partition contents from the buffer to NVRAM */ if ((rc = ef10_nvram_partn_write(enp, partn, 0, partn_data, total_length)) != 0) goto fail7; /* Unlock the partition */ ef10_nvram_partn_unlock(enp, partn); EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data); return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); ef10_nvram_partn_unlock(enp, partn); fail3: EFSYS_PROBE(fail3); EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Get the size of a NVRAM partition. This is the total size allocated in nvram, * not the data used by the segments in the partition. */ __checkReturn efx_rc_t ef10_nvram_partn_size( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *sizep) { efx_rc_t rc; if ((rc = efx_mcdi_nvram_info(enp, partn, sizep, NULL, NULL, NULL)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_lock( __in efx_nic_t *enp, __in uint32_t partn) { efx_rc_t rc; if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_read_mode( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size, __in uint32_t mode) { size_t chunk; efx_rc_t rc; while (size > 0) { chunk = MIN(size, EF10_NVRAM_CHUNK); if ((rc = efx_mcdi_nvram_read(enp, partn, offset, data, chunk, mode)) != 0) { goto fail1; } size -= chunk; data += chunk; offset += chunk; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_read( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { /* * Read requests which come in through the EFX API expect to * read the current, active partition. */ return ef10_nvram_partn_read_mode(enp, partn, offset, data, size, MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT); } __checkReturn efx_rc_t ef10_nvram_partn_erase( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __in size_t size) { efx_rc_t rc; uint32_t erase_size; if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL, &erase_size, NULL)) != 0) goto fail1; if (erase_size == 0) { if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) goto fail2; } else { if (size % erase_size != 0) { rc = EINVAL; goto fail3; } while (size > 0) { if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, erase_size)) != 0) goto fail4; offset += erase_size; size -= erase_size; } } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_write( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { size_t chunk; uint32_t write_size; efx_rc_t rc; if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL, NULL, &write_size)) != 0) goto fail1; if (write_size != 0) { /* * Check that the size is a multiple of the write chunk size if * the write chunk size is available. */ if (size % write_size != 0) { rc = EINVAL; goto fail2; } } else { write_size = EF10_NVRAM_CHUNK; } while (size > 0) { chunk = MIN(size, write_size); if ((rc = efx_mcdi_nvram_write(enp, partn, offset, data, chunk)) != 0) { goto fail3; } size -= chunk; data += chunk; offset += chunk; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_nvram_partn_unlock( __in efx_nic_t *enp, __in uint32_t partn) { boolean_t reboot; efx_rc_t rc; reboot = B_FALSE; if ((rc = efx_mcdi_nvram_update_finish(enp, partn, reboot)) != 0) goto fail1; return; fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); } __checkReturn efx_rc_t ef10_nvram_partn_set_version( __in efx_nic_t *enp, __in uint32_t partn, __in_ecount(4) uint16_t version[4]) { struct tlv_partition_version partn_version; size_t size; efx_rc_t rc; /* Add or modify partition version TLV item */ partn_version.version_w = __CPU_TO_LE_16(version[0]); partn_version.version_x = __CPU_TO_LE_16(version[1]); partn_version.version_y = __CPU_TO_LE_16(version[2]); partn_version.version_z = __CPU_TO_LE_16(version[3]); size = sizeof (partn_version) - (2 * sizeof (uint32_t)); /* Write the version number to all segments in the partition */ if ((rc = ef10_nvram_partn_write_segment_tlv(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, TLV_TAG_PARTITION_VERSION(partn), (caddr_t)&partn_version.version_w, size, B_TRUE)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ #if EFSYS_OPT_NVRAM typedef struct ef10_parttbl_entry_s { unsigned int partn; unsigned int port; efx_nvram_type_t nvtype; } ef10_parttbl_entry_t; /* Translate EFX NVRAM types to firmware partition types */ static ef10_parttbl_entry_t hunt_parttbl[] = { {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 3, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 4, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE} }; static ef10_parttbl_entry_t medford_parttbl[] = { {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 2, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 3, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 4, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE} }; static __checkReturn efx_rc_t ef10_parttbl_get( __in efx_nic_t *enp, __out ef10_parttbl_entry_t **parttblp, __out size_t *parttbl_rowsp) { switch (enp->en_family) { case EFX_FAMILY_HUNTINGTON: *parttblp = hunt_parttbl; *parttbl_rowsp = EFX_ARRAY_SIZE(hunt_parttbl); break; case EFX_FAMILY_MEDFORD: *parttblp = medford_parttbl; *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl); break; default: EFSYS_ASSERT(B_FALSE); return (EINVAL); } return (0); } __checkReturn efx_rc_t ef10_nvram_type_to_partn( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *partnp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); ef10_parttbl_entry_t *parttbl = NULL; size_t parttbl_rows = 0; unsigned int i; EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); EFSYS_ASSERT(partnp != NULL); if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) { for (i = 0; i < parttbl_rows; i++) { ef10_parttbl_entry_t *entry = &parttbl[i]; if (entry->nvtype == type && entry->port == emip->emi_port) { *partnp = entry->partn; return (0); } } } return (ENOTSUP); } #if EFSYS_OPT_DIAG static __checkReturn efx_rc_t ef10_nvram_partn_to_type( __in efx_nic_t *enp, __in uint32_t partn, __out efx_nvram_type_t *typep) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); ef10_parttbl_entry_t *parttbl = NULL; size_t parttbl_rows = 0; unsigned int i; EFSYS_ASSERT(typep != NULL); if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) { for (i = 0; i < parttbl_rows; i++) { ef10_parttbl_entry_t *entry = &parttbl[i]; if (entry->partn == partn && entry->port == emip->emi_port) { *typep = entry->nvtype; return (0); } } } return (ENOTSUP); } __checkReturn efx_rc_t ef10_nvram_test( __in efx_nic_t *enp) { efx_nvram_type_t type; unsigned int npartns = 0; uint32_t *partns = NULL; size_t size; unsigned int i; efx_rc_t rc; /* Read available partitions from NVRAM partition map */ size = MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM * sizeof (uint32_t); EFSYS_KMEM_ALLOC(enp->en_esip, size, partns); if (partns == NULL) { rc = ENOMEM; goto fail1; } if ((rc = efx_mcdi_nvram_partitions(enp, (caddr_t)partns, size, &npartns)) != 0) { goto fail2; } for (i = 0; i < npartns; i++) { /* Check if the partition is supported for this port */ if ((rc = ef10_nvram_partn_to_type(enp, partns[i], &type)) != 0) continue; if ((rc = efx_mcdi_nvram_test(enp, partns[i])) != 0) goto fail3; } EFSYS_KMEM_FREE(enp->en_esip, size, partns); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, size, partns); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ __checkReturn efx_rc_t ef10_nvram_partn_get_version( __in efx_nic_t *enp, __in uint32_t partn, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]) { efx_rc_t rc; /* FIXME: get highest partn version from all ports */ /* FIXME: return partn description if available */ if ((rc = efx_mcdi_nvram_metadata(enp, partn, subtypep, version, NULL, 0)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_rw_start( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *chunk_sizep) { efx_rc_t rc; if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0) goto fail1; if (chunk_sizep != NULL) *chunk_sizep = EF10_NVRAM_CHUNK; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_nvram_partn_rw_finish( __in efx_nic_t *enp, __in uint32_t partn) { ef10_nvram_partn_unlock(enp, partn); } #endif /* EFSYS_OPT_NVRAM */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: head/sys/dev/sfxge/common/efx_lic.c =================================================================== --- head/sys/dev/sfxge/common/efx_lic.c (revision 310681) +++ head/sys/dev/sfxge/common/efx_lic.c (revision 310682) @@ -1,1754 +1,1754 @@ /*- * Copyright (c) 2009-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_LICENSING #include "ef10_tlv_layout.h" #if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON __checkReturn efx_rc_t efx_lic_v1v2_find_start( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __out uint32_t *startp ); __checkReturn efx_rc_t efx_lic_v1v2_find_end( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *endp ); __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v1v2_find_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, __out uint32_t *lengthp ); __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v1v2_validate_key( __in efx_nic_t *enp, __in_bcount(length) caddr_t keyp, __in uint32_t length ); __checkReturn efx_rc_t efx_lic_v1v2_read_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(key_max_size, *lengthp) caddr_t keyp, __in size_t key_max_size, __out uint32_t *lengthp ); __checkReturn efx_rc_t efx_lic_v1v2_write_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, __out uint32_t *lengthp ); __checkReturn efx_rc_t efx_lic_v1v2_delete_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end, __out uint32_t *deltap ); __checkReturn efx_rc_t efx_lic_v1v2_create_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ); __checkReturn efx_rc_t efx_lic_v1v2_finish_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ); #endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t efx_mcdi_fc_license_update_license( __in efx_nic_t *enp); static __checkReturn efx_rc_t efx_mcdi_fc_license_get_key_stats( __in efx_nic_t *enp, __out efx_key_stats_t *eksp); static const efx_lic_ops_t __efx_lic_v1_ops = { efx_mcdi_fc_license_update_license, /* elo_update_licenses */ efx_mcdi_fc_license_get_key_stats, /* elo_get_key_stats */ NULL, /* elo_app_state */ NULL, /* elo_get_id */ efx_lic_v1v2_find_start, /* elo_find_start */ efx_lic_v1v2_find_end, /* elo_find_end */ efx_lic_v1v2_find_key, /* elo_find_key */ efx_lic_v1v2_validate_key, /* elo_validate_key */ efx_lic_v1v2_read_key, /* elo_read_key */ efx_lic_v1v2_write_key, /* elo_write_key */ efx_lic_v1v2_delete_key, /* elo_delete_key */ efx_lic_v1v2_create_partition, /* elo_create_partition */ efx_lic_v1v2_finish_partition, /* elo_finish_partition */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static __checkReturn efx_rc_t efx_mcdi_licensing_update_licenses( __in efx_nic_t *enp); static __checkReturn efx_rc_t efx_mcdi_licensing_get_key_stats( __in efx_nic_t *enp, __out efx_key_stats_t *eksp); static __checkReturn efx_rc_t efx_mcdi_licensed_app_state( __in efx_nic_t *enp, __in uint64_t app_id, __out boolean_t *licensedp); static const efx_lic_ops_t __efx_lic_v2_ops = { efx_mcdi_licensing_update_licenses, /* elo_update_licenses */ efx_mcdi_licensing_get_key_stats, /* elo_get_key_stats */ efx_mcdi_licensed_app_state, /* elo_app_state */ NULL, /* elo_get_id */ efx_lic_v1v2_find_start, /* elo_find_start */ efx_lic_v1v2_find_end, /* elo_find_end */ efx_lic_v1v2_find_key, /* elo_find_key */ efx_lic_v1v2_validate_key, /* elo_validate_key */ efx_lic_v1v2_read_key, /* elo_read_key */ efx_lic_v1v2_write_key, /* elo_write_key */ efx_lic_v1v2_delete_key, /* elo_delete_key */ efx_lic_v1v2_create_partition, /* elo_create_partition */ efx_lic_v1v2_finish_partition, /* elo_finish_partition */ }; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD static __checkReturn efx_rc_t efx_mcdi_licensing_v3_update_licenses( __in efx_nic_t *enp); static __checkReturn efx_rc_t efx_mcdi_licensing_v3_report_license( __in efx_nic_t *enp, __out efx_key_stats_t *eksp); static __checkReturn efx_rc_t efx_mcdi_licensing_v3_app_state( __in efx_nic_t *enp, __in uint64_t app_id, __out boolean_t *licensedp); static __checkReturn efx_rc_t efx_mcdi_licensing_v3_get_id( __in efx_nic_t *enp, __in size_t buffer_size, __out uint32_t *typep, __out size_t *lengthp, __out_bcount_part_opt(buffer_size, *lengthp) uint8_t *bufferp); __checkReturn efx_rc_t efx_lic_v3_find_start( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __out uint32_t *startp ); __checkReturn efx_rc_t efx_lic_v3_find_end( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *endp ); __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v3_find_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, __out uint32_t *lengthp ); __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v3_validate_key( __in efx_nic_t *enp, __in_bcount(length) caddr_t keyp, __in uint32_t length ); __checkReturn efx_rc_t efx_lic_v3_read_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(key_max_size, *lengthp) caddr_t keyp, __in size_t key_max_size, __out uint32_t *lengthp ); __checkReturn efx_rc_t efx_lic_v3_write_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, __out uint32_t *lengthp ); __checkReturn efx_rc_t efx_lic_v3_delete_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end, __out uint32_t *deltap ); __checkReturn efx_rc_t efx_lic_v3_create_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ); __checkReturn efx_rc_t efx_lic_v3_finish_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ); static const efx_lic_ops_t __efx_lic_v3_ops = { efx_mcdi_licensing_v3_update_licenses, /* elo_update_licenses */ efx_mcdi_licensing_v3_report_license, /* elo_get_key_stats */ efx_mcdi_licensing_v3_app_state, /* elo_app_state */ efx_mcdi_licensing_v3_get_id, /* elo_get_id */ efx_lic_v3_find_start, /* elo_find_start*/ efx_lic_v3_find_end, /* elo_find_end */ efx_lic_v3_find_key, /* elo_find_key */ efx_lic_v3_validate_key, /* elo_validate_key */ efx_lic_v3_read_key, /* elo_read_key */ efx_lic_v3_write_key, /* elo_write_key */ efx_lic_v3_delete_key, /* elo_delete_key */ efx_lic_v3_create_partition, /* elo_create_partition */ efx_lic_v3_finish_partition, /* elo_finish_partition */ }; #endif /* EFSYS_OPT_MEDFORD */ /* V1 Licensing - used in Siena Modena only */ #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t efx_mcdi_fc_license_update_license( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MC_CMD_FC_IN_LICENSE_LEN]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN; req.emr_out_buf = payload; req.emr_out_length = 0; MCDI_IN_SET_DWORD(req, FC_IN_CMD, MC_CMD_FC_OP_LICENSE); MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP, MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used != 0) { rc = EIO; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_fc_license_get_key_stats( __in efx_nic_t *enp, __out efx_key_stats_t *eksp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FC_IN_LICENSE_LEN, MC_CMD_FC_OUT_LICENSE_LEN)]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FC_OUT_LICENSE_LEN; MCDI_IN_SET_DWORD(req, FC_IN_CMD, MC_CMD_FC_OP_LICENSE); MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP, MC_CMD_FC_IN_LICENSE_GET_KEY_STATS); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_FC_OUT_LICENSE_LEN) { rc = EMSGSIZE; goto fail2; } eksp->eks_valid = MCDI_OUT_DWORD(req, FC_OUT_LICENSE_VALID_KEYS); eksp->eks_invalid = MCDI_OUT_DWORD(req, FC_OUT_LICENSE_INVALID_KEYS); eksp->eks_blacklisted = MCDI_OUT_DWORD(req, FC_OUT_LICENSE_BLACKLISTED_KEYS); eksp->eks_unverifiable = 0; eksp->eks_wrong_node = 0; eksp->eks_licensed_apps_lo = 0; eksp->eks_licensed_apps_hi = 0; eksp->eks_licensed_features_lo = 0; eksp->eks_licensed_features_hi = 0; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_SIENA */ /* V1 and V2 Partition format - based on a 16-bit TLV format */ #if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON /* * V1/V2 format - defined in SF-108542-TC section 4.2: * Type (T): 16bit - revision/HMAC algorithm * Length (L): 16bit - value length in bytes * Value (V): L bytes - payload */ #define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX (256) #define EFX_LICENSE_V1V2_HEADER_LENGTH (2 * sizeof(uint16_t)) __checkReturn efx_rc_t efx_lic_v1v2_find_start( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __out uint32_t *startp ) { _NOTE(ARGUNUSED(enp, bufferp, buffer_size)) *startp = 0; return (0); } __checkReturn efx_rc_t efx_lic_v1v2_find_end( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *endp ) { _NOTE(ARGUNUSED(enp, bufferp, buffer_size)) *endp = offset + EFX_LICENSE_V1V2_HEADER_LENGTH; return (0); } __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v1v2_find_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, __out uint32_t *lengthp ) { boolean_t found; uint16_t tlv_type; uint16_t tlv_length; _NOTE(ARGUNUSED(enp)) if ((size_t)buffer_size - offset < EFX_LICENSE_V1V2_HEADER_LENGTH) goto fail1; tlv_type = __LE_TO_CPU_16(((uint16_t*)&bufferp[offset])[0]); tlv_length = __LE_TO_CPU_16(((uint16_t*)&bufferp[offset])[1]); if ((tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) || (tlv_type == 0 && tlv_length == 0)) { found = B_FALSE; } else { *startp = offset; *lengthp = tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH; found = B_TRUE; } return (found); fail1: EFSYS_PROBE(fail1); return (B_FALSE); } __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v1v2_validate_key( __in efx_nic_t *enp, __in_bcount(length) caddr_t keyp, __in uint32_t length ) { uint16_t tlv_type; uint16_t tlv_length; _NOTE(ARGUNUSED(enp)) if (length < EFX_LICENSE_V1V2_HEADER_LENGTH) { goto fail1; } tlv_type = __LE_TO_CPU_16(((uint16_t*)keyp)[0]); tlv_length = __LE_TO_CPU_16(((uint16_t*)keyp)[1]); if (tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) { goto fail2; } if (tlv_type == 0) { goto fail3; } if ((tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH) != length) { goto fail4; } return (B_TRUE); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE(fail1); return (B_FALSE); } __checkReturn efx_rc_t efx_lic_v1v2_read_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(key_max_size, *lengthp) caddr_t keyp, __in size_t key_max_size, __out uint32_t *lengthp ) { efx_rc_t rc; _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX + EFX_LICENSE_V1V2_HEADER_LENGTH)); if (key_max_size < length) { rc = ENOSPC; goto fail1; } memcpy(keyp, &bufferp[offset], length); *lengthp = length; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_v1v2_write_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, __out uint32_t *lengthp ) { efx_rc_t rc; _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX + EFX_LICENSE_V1V2_HEADER_LENGTH)); - // Ensure space for terminator remains + /* Ensure space for terminator remains */ if ((offset + length) > (buffer_size - EFX_LICENSE_V1V2_HEADER_LENGTH)) { rc = ENOSPC; goto fail1; } memcpy(bufferp + offset, keyp, length); *lengthp = length; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_v1v2_delete_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end, __out uint32_t *deltap ) { uint32_t move_start = offset + length; uint32_t move_length = end - move_start; _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT(end <= buffer_size); - // Shift everything after the key down + /* Shift everything after the key down */ memmove(bufferp + offset, bufferp + move_start, move_length); *deltap = length; return (0); } __checkReturn efx_rc_t efx_lic_v1v2_create_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ) { _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT(EFX_LICENSE_V1V2_HEADER_LENGTH <= buffer_size); - // Write terminator + /* Write terminator */ memset(bufferp, '\0', EFX_LICENSE_V1V2_HEADER_LENGTH); return (0); } __checkReturn efx_rc_t efx_lic_v1v2_finish_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ) { _NOTE(ARGUNUSED(enp, bufferp, buffer_size)) return (0); } #endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */ /* V2 Licensing - used by Huntington family only. See SF-113611-TC */ #if EFSYS_OPT_HUNTINGTON static __checkReturn efx_rc_t efx_mcdi_licensed_app_state( __in efx_nic_t *enp, __in uint64_t app_id, __out boolean_t *licensedp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_LICENSED_APP_STATE_IN_LEN, MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN)]; uint32_t app_state; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); /* V2 licensing supports 32bit app id only */ if ((app_id >> 32) != 0) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_LICENSED_APP_STATE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_LICENSED_APP_STATE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN; MCDI_IN_SET_DWORD(req, GET_LICENSED_APP_STATE_IN_APP_ID, app_id & 0xffffffff); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN) { rc = EMSGSIZE; goto fail3; } app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_APP_STATE_OUT_STATE)); if (app_state != MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED) { *licensedp = B_TRUE; } else { *licensedp = B_FALSE; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_licensing_update_licenses( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MC_CMD_LICENSING_IN_LEN]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LICENSING; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_LICENSING_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = 0; MCDI_IN_SET_DWORD(req, LICENSING_IN_OP, MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used != 0) { rc = EIO; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_licensing_get_key_stats( __in efx_nic_t *enp, __out efx_key_stats_t *eksp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_LICENSING_IN_LEN, MC_CMD_LICENSING_OUT_LEN)]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LICENSING; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_LICENSING_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_LICENSING_OUT_LEN; MCDI_IN_SET_DWORD(req, LICENSING_IN_OP, MC_CMD_LICENSING_IN_OP_GET_KEY_STATS); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_LICENSING_OUT_LEN) { rc = EMSGSIZE; goto fail2; } eksp->eks_valid = MCDI_OUT_DWORD(req, LICENSING_OUT_VALID_APP_KEYS); eksp->eks_invalid = MCDI_OUT_DWORD(req, LICENSING_OUT_INVALID_APP_KEYS); eksp->eks_blacklisted = MCDI_OUT_DWORD(req, LICENSING_OUT_BLACKLISTED_APP_KEYS); eksp->eks_unverifiable = MCDI_OUT_DWORD(req, LICENSING_OUT_UNVERIFIABLE_APP_KEYS); eksp->eks_wrong_node = MCDI_OUT_DWORD(req, LICENSING_OUT_WRONG_NODE_APP_KEYS); eksp->eks_licensed_apps_lo = 0; eksp->eks_licensed_apps_hi = 0; eksp->eks_licensed_features_lo = 0; eksp->eks_licensed_features_hi = 0; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_HUNTINGTON */ /* V3 Licensing - used starting from Medford family. See SF-114884-SW */ #if EFSYS_OPT_MEDFORD static __checkReturn efx_rc_t efx_mcdi_licensing_v3_update_licenses( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LICENSING_V3; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN; req.emr_out_buf = NULL; req.emr_out_length = 0; MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP, MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_licensing_v3_report_license( __in efx_nic_t *enp, __out efx_key_stats_t *eksp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_LICENSING_V3_IN_LEN, MC_CMD_LICENSING_V3_OUT_LEN)]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LICENSING_V3; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_LICENSING_V3_OUT_LEN; MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP, MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_LICENSING_V3_OUT_LEN) { rc = EMSGSIZE; goto fail2; } eksp->eks_valid = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_VALID_KEYS); eksp->eks_invalid = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_INVALID_KEYS); eksp->eks_blacklisted = 0; eksp->eks_unverifiable = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_UNVERIFIABLE_KEYS); eksp->eks_wrong_node = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_WRONG_NODE_KEYS); eksp->eks_licensed_apps_lo = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_LO); eksp->eks_licensed_apps_hi = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_HI); eksp->eks_licensed_features_lo = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_LO); eksp->eks_licensed_features_hi = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_HI); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_licensing_v3_app_state( __in efx_nic_t *enp, __in uint64_t app_id, __out boolean_t *licensedp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN, MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN)]; uint32_t app_state; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN; MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO, app_id & 0xffffffff); MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI, app_id >> 32); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN) { rc = EMSGSIZE; goto fail2; } app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_V3_APP_STATE_OUT_STATE)); if (app_state != MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED) { *licensedp = B_TRUE; } else { *licensedp = B_FALSE; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_licensing_v3_get_id( __in efx_nic_t *enp, __in size_t buffer_size, __out uint32_t *typep, __out size_t *lengthp, __out_bcount_part_opt(buffer_size, *lengthp) uint8_t *bufferp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_LICENSING_GET_ID_V3_IN_LEN, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN)]; efx_rc_t rc; req.emr_cmd = MC_CMD_LICENSING_GET_ID_V3; if (bufferp == NULL) { /* Request id type and length only */ req.emr_in_buf = bufferp; req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN; req.emr_out_buf = bufferp; req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN; (void) memset(payload, 0, sizeof (payload)); } else { /* Request full buffer */ req.emr_in_buf = bufferp; req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN; req.emr_out_buf = bufferp; req.emr_out_length = MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX); (void) memset(bufferp, 0, req.emr_out_length); } efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN) { rc = EMSGSIZE; goto fail2; } *typep = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_TYPE); *lengthp = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH); if (bufferp == NULL) { /* modify length requirements to indicate to caller the extra buffering ** needed to read the complete output. */ *lengthp += MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN; } else { /* Shift ID down to start of buffer */ memmove(bufferp, bufferp + MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST, *lengthp); memset(bufferp + (*lengthp), 0, MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST); } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* V3 format uses Huntington TLV format partition. See SF-108797-SW */ #define EFX_LICENSE_V3_KEY_LENGTH_MIN (64) #define EFX_LICENSE_V3_KEY_LENGTH_MAX (160) __checkReturn efx_rc_t efx_lic_v3_find_start( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __out uint32_t *startp ) { _NOTE(ARGUNUSED(enp)) return ef10_nvram_buffer_find_item_start(bufferp, buffer_size, startp); } __checkReturn efx_rc_t efx_lic_v3_find_end( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *endp ) { _NOTE(ARGUNUSED(enp)) return ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp); } __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v3_find_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, __out uint32_t *lengthp ) { _NOTE(ARGUNUSED(enp)) return ef10_nvram_buffer_find_item(bufferp, buffer_size, offset, startp, lengthp); } __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v3_validate_key( __in efx_nic_t *enp, __in_bcount(length) caddr_t keyp, __in uint32_t length ) { - // Check key is a valid V3 key + /* Check key is a valid V3 key */ uint8_t key_type; uint8_t key_length; _NOTE(ARGUNUSED(enp)) if (length < EFX_LICENSE_V3_KEY_LENGTH_MIN) { goto fail1; } if (length > EFX_LICENSE_V3_KEY_LENGTH_MAX) { goto fail2; } key_type = ((uint8_t*)keyp)[0]; key_length = ((uint8_t*)keyp)[1]; if (key_type < 3) { goto fail3; } if (key_length > length) { goto fail4; } return (B_TRUE); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE(fail1); return (B_FALSE); } __checkReturn efx_rc_t efx_lic_v3_read_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(key_max_size, *lengthp) caddr_t keyp, __in size_t key_max_size, __out uint32_t *lengthp ) { _NOTE(ARGUNUSED(enp)) return ef10_nvram_buffer_get_item(bufferp, buffer_size, offset, length, keyp, key_max_size, lengthp); } __checkReturn efx_rc_t efx_lic_v3_write_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, __out uint32_t *lengthp ) { _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT(length <= EFX_LICENSE_V3_KEY_LENGTH_MAX); return ef10_nvram_buffer_insert_item(bufferp, buffer_size, offset, keyp, length, lengthp); } __checkReturn efx_rc_t efx_lic_v3_delete_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end, __out uint32_t *deltap ) { efx_rc_t rc; _NOTE(ARGUNUSED(enp)) if ((rc = ef10_nvram_buffer_delete_item(bufferp, buffer_size, offset, length, end)) != 0) { goto fail1; } *deltap = length; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_v3_create_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ) { efx_rc_t rc; - // Construct empty partition + /* Construct empty partition */ if ((rc = ef10_nvram_buffer_create(enp, NVRAM_PARTITION_TYPE_LICENSE, bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_v3_finish_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ) { efx_rc_t rc; if ((rc = ef10_nvram_buffer_finish(bufferp, buffer_size)) != 0) { goto fail1; } - // Validate completed partition + /* Validate completed partition */ if ((rc = ef10_nvram_buffer_validate(enp, NVRAM_PARTITION_TYPE_LICENSE, bufferp, buffer_size)) != 0) { goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_lic_init( __in efx_nic_t *enp) { const efx_lic_ops_t *elop; efx_key_stats_t eks; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_LIC)); switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: elop = &__efx_lic_v1_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: elop = &__efx_lic_v2_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: elop = &__efx_lic_v3_ops; break; #endif /* EFSYS_OPT_MEDFORD */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } enp->en_elop = elop; enp->en_mod_flags |= EFX_MOD_LIC; /* Probe for support */ if (efx_lic_get_key_stats(enp, &eks) == 0) { enp->en_licensing_supported = B_TRUE; } else { enp->en_licensing_supported = B_FALSE; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } extern __checkReturn boolean_t efx_lic_check_support( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); return enp->en_licensing_supported; } void efx_lic_fini( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); enp->en_elop = NULL; enp->en_mod_flags &= ~EFX_MOD_LIC; } __checkReturn efx_rc_t efx_lic_update_licenses( __in efx_nic_t *enp) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_update_licenses(enp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_get_key_stats( __in efx_nic_t *enp, __out efx_key_stats_t *eksp) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_get_key_stats(enp, eksp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_app_state( __in efx_nic_t *enp, __in uint64_t app_id, __out boolean_t *licensedp) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if (elop->elo_app_state == NULL) return (ENOTSUP); if ((rc = elop->elo_app_state(enp, app_id, licensedp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_get_id( __in efx_nic_t *enp, __in size_t buffer_size, __out uint32_t *typep, __out size_t *lengthp, __out_opt uint8_t *bufferp ) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if (elop->elo_get_id == NULL) return (ENOTSUP); if ((rc = elop->elo_get_id(enp, buffer_size, typep, lengthp, bufferp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Buffer management API - abstracts varying TLV format used for License partition */ __checkReturn efx_rc_t efx_lic_find_start( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __out uint32_t *startp ) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_find_start(enp, bufferp, buffer_size, startp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_find_end( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *endp ) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn __success(return != B_FALSE) boolean_t efx_lic_find_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, __out uint32_t *lengthp ) { const efx_lic_ops_t *elop = enp->en_elop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); EFSYS_ASSERT(bufferp); EFSYS_ASSERT(startp); EFSYS_ASSERT(lengthp); return (elop->elo_find_key(enp, bufferp, buffer_size, offset, startp, lengthp)); } /* Validate that the buffer contains a single key in a recognised format. ** An empty or terminator buffer is not accepted as a valid key. */ __checkReturn __success(return != B_FALSE) boolean_t efx_lic_validate_key( __in efx_nic_t *enp, __in_bcount(length) caddr_t keyp, __in uint32_t length ) { const efx_lic_ops_t *elop = enp->en_elop; boolean_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_validate_key(enp, keyp, length)) == B_FALSE) goto fail1; return (B_TRUE); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_read_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(key_max_size, *lengthp) caddr_t keyp, __in size_t key_max_size, __out uint32_t *lengthp ) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_read_key(enp, bufferp, buffer_size, offset, length, keyp, key_max_size, lengthp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_write_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, __out uint32_t *lengthp ) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_write_key(enp, bufferp, buffer_size, offset, keyp, length, lengthp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_delete_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end, __out uint32_t *deltap ) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_delete_key(enp, bufferp, buffer_size, offset, length, end, deltap)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_create_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_create_partition(enp, bufferp, buffer_size)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_finish_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_finish_partition(enp, bufferp, buffer_size)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_LICENSING */