Index: stable/11/sys/dev/sfxge/common/ef10_filter.c =================================================================== --- stable/11/sys/dev/sfxge/common/ef10_filter.c (revision 342439) +++ stable/11/sys/dev/sfxge/common/ef10_filter.c (revision 342440) @@ -1,1660 +1,1689 @@ /*- * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_FILTER #define EFE_SPEC(eftp, index) ((eftp)->eft_entry[(index)].efe_spec) static efx_filter_spec_t * ef10_filter_entry_spec( __in const ef10_filter_table_t *eftp, __in unsigned int index) { return ((efx_filter_spec_t *)(EFE_SPEC(eftp, index) & ~(uintptr_t)EFX_EF10_FILTER_FLAGS)); } static boolean_t ef10_filter_entry_is_busy( __in const ef10_filter_table_t *eftp, __in unsigned int index) { if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_BUSY) return (B_TRUE); else return (B_FALSE); } static boolean_t ef10_filter_entry_is_auto_old( __in const ef10_filter_table_t *eftp, __in unsigned int index) { if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_AUTO_OLD) return (B_TRUE); else return (B_FALSE); } static void ef10_filter_set_entry( __inout ef10_filter_table_t *eftp, __in unsigned int index, __in_opt const efx_filter_spec_t *efsp) { EFE_SPEC(eftp, index) = (uintptr_t)efsp; } static void ef10_filter_set_entry_busy( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_BUSY; } static void ef10_filter_set_entry_not_busy( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_BUSY; } static void ef10_filter_set_entry_auto_old( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL); EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD; } static void ef10_filter_set_entry_not_auto_old( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD; EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL); } __checkReturn efx_rc_t ef10_filter_init( __in efx_nic_t *enp) { efx_rc_t rc; ef10_filter_table_t *eftp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); #define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match)) EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_UNKNOWN_MCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST)); EFX_STATIC_ASSERT((uint32_t)EFX_FILTER_MATCH_UNKNOWN_UCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST)); #undef MATCH_MASK EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp); if (!eftp) { rc = ENOMEM; goto fail1; } enp->en_filter.ef_ef10_filter_table = eftp; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_filter_fini( __in efx_nic_t *enp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); if (enp->en_filter.ef_ef10_filter_table != NULL) { EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t), enp->en_filter.ef_ef10_filter_table); } } static __checkReturn efx_rc_t efx_mcdi_filter_op_add( __in efx_nic_t *enp, __in efx_filter_spec_t *spec, __in unsigned int filter_op, __inout ef10_filter_handle_t *handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN, MC_CMD_FILTER_OP_EXT_OUT_LEN)]; efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FILTER_OP; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN; switch (filter_op) { case MC_CMD_FILTER_OP_IN_OP_REPLACE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO, handle->efh_lo); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI, handle->efh_hi); /* Fall through */ case MC_CMD_FILTER_OP_IN_OP_INSERT: case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, filter_op); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail1; } MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_MATCH_FIELDS, spec->efs_match_flags); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST, MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE, spec->efs_dmaq_id); if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) { MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_CONTEXT, spec->efs_rss_context); } MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_MODE, spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ? MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS : MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_TX_DEST, MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT); if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) { /* * NOTE: Unlike most MCDI requests, the filter fields * are presented in network (big endian) byte order. */ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_MAC), spec->efs_rem_mac, EFX_MAC_ADDR_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_MAC), spec->efs_loc_mac, EFX_MAC_ADDR_LEN); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_SRC_PORT, __CPU_TO_BE_16(spec->efs_rem_port)); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_DST_PORT, __CPU_TO_BE_16(spec->efs_loc_port)); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_ETHER_TYPE, __CPU_TO_BE_16(spec->efs_ether_type)); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_INNER_VLAN, __CPU_TO_BE_16(spec->efs_inner_vid)); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_OUTER_VLAN, __CPU_TO_BE_16(spec->efs_outer_vid)); /* IP protocol (in low byte, high byte is zero) */ MCDI_IN_SET_BYTE(req, FILTER_OP_EXT_IN_IP_PROTO, spec->efs_ip_proto); EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) == MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN); EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) == MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_IP), &spec->efs_rem_host.eo_byte[0], MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_IP), &spec->efs_loc_host.eo_byte[0], MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN); /* * On Medford, filters for encapsulated packets match based on * the ether type and IP protocol in the outer frame. In * addition we need to fill in the VNI or VSID type field. */ switch (spec->efs_encap_type) { case EFX_TUNNEL_PROTOCOL_NONE: break; case EFX_TUNNEL_PROTOCOL_VXLAN: case EFX_TUNNEL_PROTOCOL_GENEVE: MCDI_IN_POPULATE_DWORD_1(req, FILTER_OP_EXT_IN_VNI_OR_VSID, FILTER_OP_EXT_IN_VNI_TYPE, spec->efs_encap_type == EFX_TUNNEL_PROTOCOL_VXLAN ? MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN : MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE); break; case EFX_TUNNEL_PROTOCOL_NVGRE: MCDI_IN_POPULATE_DWORD_1(req, FILTER_OP_EXT_IN_VNI_OR_VSID, FILTER_OP_EXT_IN_VSID_TYPE, MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail2; } } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) { rc = EMSGSIZE; goto fail4; } handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_LO); handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_HI); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_filter_op_delete( __in efx_nic_t *enp, __in unsigned int filter_op, __inout ef10_filter_handle_t *handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN, MC_CMD_FILTER_OP_EXT_OUT_LEN)]; efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FILTER_OP; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN; switch (filter_op) { case MC_CMD_FILTER_OP_IN_OP_REMOVE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, MC_CMD_FILTER_OP_IN_OP_REMOVE); break; case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail1; } MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO, handle->efh_lo); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI, handle->efh_hi); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) { rc = EMSGSIZE; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn boolean_t ef10_filter_equal( __in const efx_filter_spec_t *left, __in const efx_filter_spec_t *right) { /* FIXME: Consider rx vs tx filters (look at efs_flags) */ if (left->efs_match_flags != right->efs_match_flags) return (B_FALSE); if (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host)) return (B_FALSE); if (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host)) return (B_FALSE); if (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN)) return (B_FALSE); if (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN)) return (B_FALSE); if (left->efs_rem_port != right->efs_rem_port) return (B_FALSE); if (left->efs_loc_port != right->efs_loc_port) return (B_FALSE); if (left->efs_inner_vid != right->efs_inner_vid) return (B_FALSE); if (left->efs_outer_vid != right->efs_outer_vid) return (B_FALSE); if (left->efs_ether_type != right->efs_ether_type) return (B_FALSE); if (left->efs_ip_proto != right->efs_ip_proto) return (B_FALSE); if (left->efs_encap_type != right->efs_encap_type) return (B_FALSE); return (B_TRUE); } static __checkReturn boolean_t ef10_filter_same_dest( __in const efx_filter_spec_t *left, __in const efx_filter_spec_t *right) { if ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) && (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) { if (left->efs_rss_context == right->efs_rss_context) return (B_TRUE); } else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) && (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) { if (left->efs_dmaq_id == right->efs_dmaq_id) return (B_TRUE); } return (B_FALSE); } static __checkReturn uint32_t ef10_filter_hash( __in efx_filter_spec_t *spec) { EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t)) == 0); EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) % sizeof (uint32_t)) == 0); /* * As the area of the efx_filter_spec_t we need to hash is DWORD * aligned and an exact number of DWORDs in size we can use the * optimised efx_hash_dwords() rather than efx_hash_bytes() */ return (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid, (sizeof (efx_filter_spec_t) - EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) / sizeof (uint32_t), 0)); } /* * Decide whether a filter should be exclusive or else should allow * delivery to additional recipients. Currently we decide that * filters for specific local unicast MAC and IP addresses are * exclusive. */ static __checkReturn boolean_t ef10_filter_is_exclusive( __in efx_filter_spec_t *spec) { if ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) && !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac)) return (B_TRUE); if ((spec->efs_match_flags & (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) && ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe)) return (B_TRUE); if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) && (spec->efs_loc_host.eo_u8[0] != 0xff)) return (B_TRUE); } return (B_FALSE); } __checkReturn efx_rc_t ef10_filter_restore( __in efx_nic_t *enp) { int tbl_id; efx_filter_spec_t *spec; ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; boolean_t restoring; efsys_lock_state_t state; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) { EFSYS_LOCK(enp->en_eslp, state); spec = ef10_filter_entry_spec(eftp, tbl_id); if (spec == NULL) { restoring = B_FALSE; } else if (ef10_filter_entry_is_busy(eftp, tbl_id)) { /* Ignore busy entries. */ restoring = B_FALSE; } else { ef10_filter_set_entry_busy(eftp, tbl_id); restoring = B_TRUE; } EFSYS_UNLOCK(enp->en_eslp, state); if (restoring == B_FALSE) continue; if (ef10_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_INSERT, &eftp->eft_entry[tbl_id].efe_handle); } else { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE, &eftp->eft_entry[tbl_id].efe_handle); } if (rc != 0) goto fail1; EFSYS_LOCK(enp->en_eslp, state); ef10_filter_set_entry_not_busy(eftp, tbl_id); EFSYS_UNLOCK(enp->en_eslp, state); } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * An arbitrary search limit for the software hash table. As per the linux net * driver. */ #define EF10_FILTER_SEARCH_LIMIT 200 static __checkReturn efx_rc_t ef10_filter_add_internal( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace, __out_opt uint32_t *filter_id) { efx_rc_t rc; ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t *saved_spec; uint32_t hash; unsigned int depth; int ins_index; boolean_t replacing = B_FALSE; unsigned int i; efsys_lock_state_t state; boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); #if EFSYS_OPT_RX_SCALE spec->efs_rss_context = enp->en_rss_context; #endif hash = ef10_filter_hash(spec); /* * FIXME: Add support for inserting filters of different priorities * and removing lower priority multicast filters (bug 42378) */ /* * Find any existing filters with the same match tuple or * else a free slot to insert at. If any of them are busy, * we have to wait and retry. */ for (;;) { ins_index = -1; depth = 1; EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; for (;;) { i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1); saved_spec = ef10_filter_entry_spec(eftp, i); if (!saved_spec) { if (ins_index < 0) { ins_index = i; } } else if (ef10_filter_equal(spec, saved_spec)) { if (ef10_filter_entry_is_busy(eftp, i)) break; if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) { ins_index = i; goto found; } else if (ef10_filter_is_exclusive(spec)) { if (may_replace) { ins_index = i; goto found; } else { rc = EEXIST; goto fail1; } } /* Leave existing */ } /* * Once we reach the maximum search depth, use * the first suitable slot or return EBUSY if * there was none. */ if (depth == EF10_FILTER_SEARCH_LIMIT) { if (ins_index < 0) { rc = EBUSY; goto fail2; } goto found; } depth++; } EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; } found: /* * Create a software table entry if necessary, and mark it * busy. We might yet fail to insert, but any attempt to * insert a conflicting filter while we're waiting for the * firmware must find the busy entry. */ saved_spec = ef10_filter_entry_spec(eftp, ins_index); if (saved_spec) { if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) { /* This is a filter we are refreshing */ ef10_filter_set_entry_not_auto_old(eftp, ins_index); goto out_unlock; } replacing = B_TRUE; } else { EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), saved_spec); if (!saved_spec) { rc = ENOMEM; goto fail3; } *saved_spec = *spec; ef10_filter_set_entry(eftp, ins_index, saved_spec); } ef10_filter_set_entry_busy(eftp, ins_index); EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; /* * On replacing the filter handle may change after after a successful * replace operation. */ if (replacing) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_REPLACE, &eftp->eft_entry[ins_index].efe_handle); } else if (ef10_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_INSERT, &eftp->eft_entry[ins_index].efe_handle); } else { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE, &eftp->eft_entry[ins_index].efe_handle); } if (rc != 0) goto fail4; EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; if (replacing) { /* Update the fields that may differ */ saved_spec->efs_priority = spec->efs_priority; saved_spec->efs_flags = spec->efs_flags; saved_spec->efs_rss_context = spec->efs_rss_context; saved_spec->efs_dmaq_id = spec->efs_dmaq_id; } ef10_filter_set_entry_not_busy(eftp, ins_index); out_unlock: EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; if (filter_id) *filter_id = ins_index; return (0); fail4: EFSYS_PROBE(fail4); if (!replacing) { EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), saved_spec); saved_spec = NULL; } ef10_filter_set_entry_not_busy(eftp, ins_index); ef10_filter_set_entry(eftp, ins_index, NULL); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); if (locked) EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } __checkReturn efx_rc_t ef10_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace) { efx_rc_t rc; rc = ef10_filter_add_internal(enp, spec, may_replace, NULL); if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_delete_internal( __in efx_nic_t *enp, __in uint32_t filter_id) { efx_rc_t rc; ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t *spec; efsys_lock_state_t state; uint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS; /* * Find the software table entry and mark it busy. Don't * remove it yet; any attempt to update while we're waiting * for the firmware must find the busy entry. * * FIXME: What if the busy flag is never cleared? */ EFSYS_LOCK(enp->en_eslp, state); while (ef10_filter_entry_is_busy(table, filter_idx)) { EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_SPIN(1); EFSYS_LOCK(enp->en_eslp, state); } if ((spec = ef10_filter_entry_spec(table, filter_idx)) != NULL) { ef10_filter_set_entry_busy(table, filter_idx); } EFSYS_UNLOCK(enp->en_eslp, state); if (spec == NULL) { rc = ENOENT; goto fail1; } /* * Try to remove the hardware filter. This may fail if the MC has * rebooted (which frees all hardware filter resources). */ if (ef10_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_delete(enp, MC_CMD_FILTER_OP_IN_OP_REMOVE, &table->eft_entry[filter_idx].efe_handle); } else { rc = efx_mcdi_filter_op_delete(enp, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE, &table->eft_entry[filter_idx].efe_handle); } /* Free the software table entry */ EFSYS_LOCK(enp->en_eslp, state); ef10_filter_set_entry_not_busy(table, filter_idx); ef10_filter_set_entry(table, filter_idx, NULL); EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec); /* Check result of hardware filter removal */ if (rc != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec) { efx_rc_t rc; ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t *saved_spec; unsigned int hash; unsigned int depth; unsigned int i; efsys_lock_state_t state; boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); hash = ef10_filter_hash(spec); EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; depth = 1; for (;;) { i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1); saved_spec = ef10_filter_entry_spec(table, i); if (saved_spec && ef10_filter_equal(spec, saved_spec) && ef10_filter_same_dest(spec, saved_spec)) { break; } if (depth == EF10_FILTER_SEARCH_LIMIT) { rc = ENOENT; goto fail1; } depth++; } EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; rc = ef10_filter_delete_internal(enp, i); if (rc != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); if (locked) EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } static __checkReturn efx_rc_t efx_mcdi_get_parser_disp_info( __in efx_nic_t *enp, __out_ecount(buffer_length) uint32_t *buffer, __in size_t buffer_length, __out size_t *list_lengthp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)]; size_t matches_count; size_t list_size; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX; MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } matches_count = MCDI_OUT_DWORD(req, GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES); if (req.emr_out_length_used < MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(matches_count)) { rc = EMSGSIZE; goto fail2; } *list_lengthp = matches_count; if (buffer_length < matches_count) { rc = ENOSPC; goto fail3; } /* * Check that the elements in the list in the MCDI response are the size * we expect, so we can just copy them directly. Any conversion of the * flags is handled by the caller. */ EFX_STATIC_ASSERT(sizeof (uint32_t) == MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN); list_size = matches_count * MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN; memcpy(buffer, MCDI_OUT2(req, uint32_t, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES), list_size); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_filter_supported_filters( __in efx_nic_t *enp, __out_ecount(buffer_length) uint32_t *buffer, __in size_t buffer_length, __out size_t *list_lengthp) { size_t mcdi_list_length; size_t list_length; uint32_t i; efx_rc_t rc; efx_filter_match_flags_t all_filter_flags = (EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT | EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_INNER_VID | EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_UNKNOWN_MCAST_DST | EFX_FILTER_MATCH_UNKNOWN_UCAST_DST); rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length, &mcdi_list_length); if (rc != 0) { if (rc == ENOSPC) { /* Pass through mcdi_list_length for the list length */ *list_lengthp = mcdi_list_length; } goto fail1; } /* * The static assertions in ef10_filter_init() ensure that the values of * the EFX_FILTER_MATCH flags match those used by MCDI, so they don't * need to be converted. * * In case support is added to MCDI for additional flags, remove any * matches from the list which include flags we don't support. The order * of the matches is preserved as they are ordered from highest to * lowest priority. */ EFSYS_ASSERT(mcdi_list_length <= buffer_length); list_length = 0; for (i = 0; i < mcdi_list_length; i++) { if ((buffer[i] & ~all_filter_flags) == 0) { buffer[list_length] = buffer[i]; list_length++; } } *list_lengthp = list_length; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_unicast( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *addr, __in efx_filter_flags_t filter_flags) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; efx_rc_t rc; /* Insert the filter for the local station address */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); - efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr); + rc = efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, + addr); + if (rc != 0) + goto fail1; rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]); if (rc != 0) - goto fail1; + goto fail2; eftp->eft_unicst_filter_count++; EFSYS_ASSERT(eftp->eft_unicst_filter_count <= EFX_EF10_FILTER_UNICAST_FILTERS_MAX); return (0); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_all_unicast( __in efx_nic_t *enp, __in efx_filter_flags_t filter_flags) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; efx_rc_t rc; /* Insert the unknown unicast filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); - efx_filter_spec_set_uc_def(&spec); + rc = efx_filter_spec_set_uc_def(&spec); + if (rc != 0) + goto fail1; rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]); if (rc != 0) - goto fail1; + goto fail2; eftp->eft_unicst_filter_count++; EFSYS_ASSERT(eftp->eft_unicst_filter_count <= EFX_EF10_FILTER_UNICAST_FILTERS_MAX); return (0); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_multicast_list( __in efx_nic_t *enp, __in boolean_t mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count, __in efx_filter_flags_t filter_flags, __in boolean_t rollback) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; uint8_t addr[6]; uint32_t i; uint32_t filter_index; uint32_t filter_count; efx_rc_t rc; if (mulcst == B_FALSE) count = 0; if (count + (brdcst ? 1 : 0) > EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) { /* Too many MAC addresses */ rc = EINVAL; goto fail1; } /* Insert/renew multicast address list filters */ filter_count = 0; for (i = 0; i < count; i++) { efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); - efx_filter_spec_set_eth_local(&spec, + rc = efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, &addrs[i * EFX_MAC_ADDR_LEN]); + if (rc != 0) { + if (rollback == B_TRUE) { + /* Only stop upon failure if told to rollback */ + goto rollback; + } else { + /* + * Don't try to add a filter with a corrupt + * specification. + */ + continue; + } + } rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &filter_index); if (rc == 0) { eftp->eft_mulcst_filter_indexes[filter_count] = filter_index; filter_count++; } else if (rollback == B_TRUE) { /* Only stop upon failure if told to rollback */ goto rollback; } } if (brdcst == B_TRUE) { /* Insert/renew broadcast address filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); EFX_MAC_BROADCAST_ADDR_SET(addr); - efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, - addr); + rc = efx_filter_spec_set_eth_local(&spec, + EFX_FILTER_SPEC_VID_UNSPEC, addr); + if ((rc != 0) && (rollback == B_TRUE)) { + /* Only stop upon failure if told to rollback */ + goto rollback; + } rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &filter_index); if (rc == 0) { eftp->eft_mulcst_filter_indexes[filter_count] = filter_index; filter_count++; } else if (rollback == B_TRUE) { /* Only stop upon failure if told to rollback */ goto rollback; } } eftp->eft_mulcst_filter_count = filter_count; eftp->eft_using_all_mulcst = B_FALSE; return (0); rollback: /* Remove any filters we have inserted */ i = filter_count; while (i--) { (void) ef10_filter_delete_internal(enp, eftp->eft_mulcst_filter_indexes[i]); } eftp->eft_mulcst_filter_count = 0; fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_all_multicast( __in efx_nic_t *enp, __in efx_filter_flags_t filter_flags) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; efx_rc_t rc; /* Insert the unknown multicast filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); - efx_filter_spec_set_mc_def(&spec); + rc = efx_filter_spec_set_mc_def(&spec); + if (rc != 0) + goto fail1; rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &eftp->eft_mulcst_filter_indexes[0]); if (rc != 0) - goto fail1; + goto fail2; eftp->eft_mulcst_filter_count = 1; eftp->eft_using_all_mulcst = B_TRUE; /* * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic. */ return (0); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } typedef struct ef10_filter_encap_entry_s { uint16_t ether_type; efx_tunnel_protocol_t encap_type; uint32_t inner_frame_match; } ef10_filter_encap_entry_t; #define EF10_ENCAP_FILTER_ENTRY(ipv, encap_type, inner_frame_match) \ { EFX_ETHER_TYPE_##ipv, EFX_TUNNEL_PROTOCOL_##encap_type, \ EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_##inner_frame_match } static ef10_filter_encap_entry_t ef10_filter_encap_list[] = { EF10_ENCAP_FILTER_ENTRY(IPV4, VXLAN, UCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV4, VXLAN, MCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV6, VXLAN, UCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV6, VXLAN, MCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV4, GENEVE, UCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV4, GENEVE, MCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV6, GENEVE, UCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV6, GENEVE, MCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV4, NVGRE, UCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV4, NVGRE, MCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV6, NVGRE, UCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV6, NVGRE, MCAST_DST), }; #undef EF10_ENCAP_FILTER_ENTRY static __checkReturn efx_rc_t ef10_filter_insert_encap_filters( __in efx_nic_t *enp, __in boolean_t mulcst, __in efx_filter_flags_t filter_flags) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; uint32_t i; efx_rc_t rc; EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(ef10_filter_encap_list) <= EFX_ARRAY_SIZE(table->eft_encap_filter_indexes)); /* * On Medford, full-featured firmware can identify packets as being * tunnel encapsulated, even if no encapsulated packet offloads are in * use. When packets are identified as such, ordinary filters are not * applied, only ones specific to encapsulated packets. Hence we need to * insert filters for encapsulated packets in order to receive them. * * Separate filters need to be inserted for each ether type, * encapsulation type, and inner frame type (unicast or multicast). To * keep things simple and reduce the number of filters needed, catch-all * filters for all combinations of types are inserted, even if * all_unicst or all_mulcst have not been set. (These catch-all filters * may well, however, fail to insert on unprivileged functions.) */ table->eft_encap_filter_count = 0; for (i = 0; i < EFX_ARRAY_SIZE(ef10_filter_encap_list); i++) { efx_filter_spec_t spec; ef10_filter_encap_entry_t *encap_filter = &ef10_filter_encap_list[i]; /* * Skip multicast filters if we've not been asked for * any multicast traffic. */ if ((mulcst == B_FALSE) && (encap_filter->inner_frame_match == EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST)) continue; efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, table->eft_default_rxq); efx_filter_spec_set_ether_type(&spec, encap_filter->ether_type); rc = efx_filter_spec_set_encap_type(&spec, encap_filter->encap_type, encap_filter->inner_frame_match); if (rc != 0) goto fail1; rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &table->eft_encap_filter_indexes[ table->eft_encap_filter_count]); if (rc != 0) { if (rc != EACCES) goto fail2; } else { table->eft_encap_filter_count++; } } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static void ef10_filter_remove_old( __in efx_nic_t *enp) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; uint32_t i; for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) { if (ef10_filter_entry_is_auto_old(table, i)) { (void) ef10_filter_delete_internal(enp, i); } } } static __checkReturn efx_rc_t ef10_filter_get_workarounds( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &enp->en_nic_cfg; uint32_t implemented = 0; uint32_t enabled = 0; efx_rc_t rc; rc = efx_mcdi_get_workarounds(enp, &implemented, &enabled); if (rc == 0) { /* Check if chained multicast filter support is enabled */ if (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807) encp->enc_bug26807_workaround = B_TRUE; else encp->enc_bug26807_workaround = B_FALSE; } else if (rc == ENOTSUP) { /* * Firmware is too old to support GET_WORKAROUNDS, and support * for this workaround was implemented later. */ encp->enc_bug26807_workaround = B_FALSE; } else { goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Reconfigure all filters. * If all_unicst and/or all mulcst filters cannot be applied then * return ENOTSUP (Note the filters for the specified addresses are * still applied in this case). */ __checkReturn efx_rc_t ef10_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count) { efx_nic_cfg_t *encp = &enp->en_nic_cfg; ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; efx_filter_flags_t filter_flags; unsigned int i; efx_rc_t all_unicst_rc = 0; efx_rc_t all_mulcst_rc = 0; efx_rc_t rc; if (table->eft_default_rxq == NULL) { /* * Filters direct traffic to the default RXQ, and so cannot be * inserted until it is available. Any currently configured * filters must be removed (ignore errors in case the MC * has rebooted, which removes hardware filters). */ for (i = 0; i < table->eft_unicst_filter_count; i++) { (void) ef10_filter_delete_internal(enp, table->eft_unicst_filter_indexes[i]); } table->eft_unicst_filter_count = 0; for (i = 0; i < table->eft_mulcst_filter_count; i++) { (void) ef10_filter_delete_internal(enp, table->eft_mulcst_filter_indexes[i]); } table->eft_mulcst_filter_count = 0; for (i = 0; i < table->eft_encap_filter_count; i++) { (void) ef10_filter_delete_internal(enp, table->eft_encap_filter_indexes[i]); } table->eft_encap_filter_count = 0; return (0); } if (table->eft_using_rss) filter_flags = EFX_FILTER_FLAG_RX_RSS; else filter_flags = 0; /* Mark old filters which may need to be removed */ for (i = 0; i < table->eft_unicst_filter_count; i++) { ef10_filter_set_entry_auto_old(table, table->eft_unicst_filter_indexes[i]); } for (i = 0; i < table->eft_mulcst_filter_count; i++) { ef10_filter_set_entry_auto_old(table, table->eft_mulcst_filter_indexes[i]); } for (i = 0; i < table->eft_encap_filter_count; i++) { ef10_filter_set_entry_auto_old(table, table->eft_encap_filter_indexes[i]); } /* * Insert or renew unicast filters. * * Frimware does not perform chaining on unicast filters. As traffic is * therefore only delivered to the first matching filter, we should * always insert the specific filter for our MAC address, to try and * ensure we get that traffic. * * (If the filter for our MAC address has already been inserted by * another function, we won't receive traffic sent to us, even if we * insert a unicast mismatch filter. To prevent traffic stealing, this * therefore relies on the privilege model only allowing functions to * insert filters for their own MAC address unless explicitly given * additional privileges by the user. This also means that, even on a * priviliged function, inserting a unicast mismatch filter may not * catch all traffic in multi PCI function scenarios.) */ table->eft_unicst_filter_count = 0; rc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags); if (all_unicst || (rc != 0)) { all_unicst_rc = ef10_filter_insert_all_unicast(enp, filter_flags); if ((rc != 0) && (all_unicst_rc != 0)) goto fail1; } /* * WORKAROUND_BUG26807 controls firmware support for chained multicast * filters, and can only be enabled or disabled when the hardware filter * table is empty. * * Chained multicast filters require support from the datapath firmware, * and may not be available (e.g. low-latency variants or old Huntington * firmware). * * Firmware will reset (FLR) functions which have inserted filters in * the hardware filter table when the workaround is enabled/disabled. * Functions without any hardware filters are not reset. * * Re-check if the workaround is enabled after adding unicast hardware * filters. This ensures that encp->enc_bug26807_workaround matches the * firmware state, and that later changes to enable/disable the * workaround will result in this function seeing a reset (FLR). * * In common-code drivers, we only support multiple PCI function * scenarios with firmware that supports multicast chaining, so we can * assume it is enabled for such cases and hence simplify the filter * insertion logic. Firmware that does not support multicast chaining * does not support multiple PCI function configurations either, so * filter insertion is much simpler and the same strategies can still be * used. */ if ((rc = ef10_filter_get_workarounds(enp)) != 0) goto fail2; if ((table->eft_using_all_mulcst != all_mulcst) && (encp->enc_bug26807_workaround == B_TRUE)) { /* * Multicast filter chaining is enabled, so traffic that matches * more than one multicast filter will be replicated and * delivered to multiple recipients. To avoid this duplicate * delivery, remove old multicast filters before inserting new * multicast filters. */ ef10_filter_remove_old(enp); } /* Insert or renew multicast filters */ if (all_mulcst == B_TRUE) { /* * Insert the all multicast filter. If that fails, try to insert * all of our multicast filters (but without rollback on * failure). */ all_mulcst_rc = ef10_filter_insert_all_multicast(enp, filter_flags); if (all_mulcst_rc != 0) { rc = ef10_filter_insert_multicast_list(enp, B_TRUE, brdcst, addrs, count, filter_flags, B_FALSE); if (rc != 0) goto fail3; } } else { /* * Insert filters for multicast addresses. * If any insertion fails, then rollback and try to insert the * all multicast filter instead. * If that also fails, try to insert all of the multicast * filters (but without rollback on failure). */ rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst, addrs, count, filter_flags, B_TRUE); if (rc != 0) { if ((table->eft_using_all_mulcst == B_FALSE) && (encp->enc_bug26807_workaround == B_TRUE)) { /* * Multicast filter chaining is on, so remove * old filters before inserting the multicast * all filter to avoid duplicate delivery caused * by packets matching multiple filters. */ ef10_filter_remove_old(enp); } rc = ef10_filter_insert_all_multicast(enp, filter_flags); if (rc != 0) { rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst, addrs, count, filter_flags, B_FALSE); if (rc != 0) goto fail4; } } } if (encp->enc_tunnel_encapsulations_supported != 0) { /* Try to insert filters for encapsulated packets. */ (void) ef10_filter_insert_encap_filters(enp, mulcst || all_mulcst || brdcst, filter_flags); } /* Remove old filters which were not renewed */ ef10_filter_remove_old(enp); /* report if any optional flags were rejected */ if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) || ((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) { rc = ENOTSUP; } return (rc); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); /* Clear auto old flags */ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) { if (ef10_filter_entry_is_auto_old(table, i)) { ef10_filter_set_entry_not_auto_old(table, i); } } return (rc); } void ef10_filter_get_default_rxq( __in efx_nic_t *enp, __out efx_rxq_t **erpp, __out boolean_t *using_rss) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; *erpp = table->eft_default_rxq; *using_rss = table->eft_using_rss; } void ef10_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; #if EFSYS_OPT_RX_SCALE EFSYS_ASSERT((using_rss == B_FALSE) || (enp->en_rss_context != EF10_RSS_CONTEXT_INVALID)); table->eft_using_rss = using_rss; #else EFSYS_ASSERT(using_rss == B_FALSE); table->eft_using_rss = B_FALSE; #endif table->eft_default_rxq = erp; } void ef10_filter_default_rxq_clear( __in efx_nic_t *enp) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; table->eft_default_rxq = NULL; table->eft_using_rss = B_FALSE; } #endif /* EFSYS_OPT_FILTER */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: stable/11/sys/dev/sfxge/common/ef10_mac.c =================================================================== --- stable/11/sys/dev/sfxge/common/ef10_mac.c (revision 342439) +++ stable/11/sys/dev/sfxge/common/ef10_mac.c (revision 342440) @@ -1,900 +1,900 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD __checkReturn efx_rc_t ef10_mac_poll( __in efx_nic_t *enp, __out efx_link_mode_t *link_modep) { efx_port_t *epp = &(enp->en_port); ef10_link_state_t els; efx_rc_t rc; if ((rc = ef10_phy_get_link(enp, &els)) != 0) goto fail1; epp->ep_adv_cap_mask = els.els_adv_cap_mask; epp->ep_fcntl = els.els_fcntl; *link_modep = els.els_link_mode; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); *link_modep = EFX_LINK_UNKNOWN; return (rc); } __checkReturn efx_rc_t ef10_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp) { ef10_link_state_t els; efx_rc_t rc; /* * Because EF10 doesn't *require* polling, we can't rely on * ef10_mac_poll() being executed to populate epp->ep_mac_up. */ if ((rc = ef10_phy_get_link(enp, &els)) != 0) goto fail1; *mac_upp = els.els_mac_up; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * EF10 adapters use MC_CMD_VADAPTOR_SET_MAC to set the * MAC address; the address field in MC_CMD_SET_MAC has no * effect. * MC_CMD_VADAPTOR_SET_MAC requires mac-spoofing privilege and * the port to have no filters or queues active. */ static __checkReturn efx_rc_t efx_mcdi_vadapter_set_mac( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_VADAPTOR_SET_MAC_IN_LEN, MC_CMD_VADAPTOR_SET_MAC_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_VADAPTOR_SET_MAC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_VADAPTOR_SET_MAC_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_VADAPTOR_SET_MAC_OUT_LEN; MCDI_IN_SET_DWORD(req, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, enp->en_vport_id); EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, VADAPTOR_SET_MAC_IN_MACADDR), epp->ep_mac_addr); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_mac_addr_set( __in efx_nic_t *enp) { efx_rc_t rc; if ((rc = efx_mcdi_vadapter_set_mac(enp)) != 0) { if (rc != ENOTSUP) goto fail1; /* * Fallback for older Huntington firmware without Vadapter * support. */ if ((rc = ef10_mac_reconfigure(enp)) != 0) goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_mtu_set( __in efx_nic_t *enp, __in uint32_t mtu) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN, MC_CMD_SET_MAC_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_MAC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN; /* Only configure the MTU in this call to MC_CMD_SET_MAC */ MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_MTU, mtu); MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_EXT_IN_CONTROL, SET_MAC_EXT_IN_CFG_MTU, 1); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_mtu_get( __in efx_nic_t *enp, __out size_t *mtu) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN, MC_CMD_SET_MAC_V2_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_MAC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_MAC_V2_OUT_LEN; /* * With MC_CMD_SET_MAC_EXT_IN_CONTROL set to 0, this just queries the * MTU. This should always be supported on Medford, but it is not * supported on older Huntington firmware. */ MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_CONTROL, 0); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_SET_MAC_V2_OUT_MTU_OFST + 4) { rc = EMSGSIZE; goto fail2; } *mtu = MCDI_OUT_DWORD(req, SET_MAC_V2_OUT_MTU); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_mac_pdu_set( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_rc_t rc; if (encp->enc_enhanced_set_mac_supported) { if ((rc = efx_mcdi_mtu_set(enp, epp->ep_mac_pdu)) != 0) goto fail1; } else { /* * Fallback for older Huntington firmware, which always * configure all of the parameters to MC_CMD_SET_MAC. This isn't * suitable for setting the MTU on unpriviliged functions. */ if ((rc = ef10_mac_reconfigure(enp)) != 0) goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_mac_pdu_get( __in efx_nic_t *enp, __out size_t *pdu) { efx_rc_t rc; if ((rc = efx_mcdi_mtu_get(enp, pdu)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_mac_reconfigure( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN, MC_CMD_SET_MAC_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_MAC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_MAC_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN; MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu); MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0); EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR), epp->ep_mac_addr); /* * Note: The Huntington MAC does not support REJECT_BRDCST. * The REJECT_UNCST flag will also prevent multicast traffic * from reaching the filters. As Huntington filters drop any * traffic that does not match a filter it is ok to leave the * MAC running in promiscuous mode. See bug41141. * * FIXME: Does REJECT_UNCST behave the same way on Medford? */ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT, SET_MAC_IN_REJECT_UNCST, 0, SET_MAC_IN_REJECT_BRDCST, 0); /* * Flow control, whether it is auto-negotiated or not, * is set via the PHY advertised capabilities. When set to * automatic the MAC will use the PHY settings to determine * the flow control settings. */ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, MC_CMD_FCNTL_AUTO); /* Do not include the Ethernet frame checksum in RX packets */ MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_IN_FLAGS, SET_MAC_IN_FLAG_INCLUDE_FCS, 0); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { /* * Unprivileged functions cannot control link state, * but still need to configure filters. */ if (req.emr_rc != EACCES) { rc = req.emr_rc; goto fail1; } } /* * Apply the filters for the MAC configuration. * If the NIC isn't ready to accept filters this may * return success without setting anything. */ rc = efx_filter_reconfigure(enp, epp->ep_mac_addr, epp->ep_all_unicst, epp->ep_mulcst, epp->ep_all_mulcst, epp->ep_brdcst, epp->ep_mulcst_addr_list, epp->ep_mulcst_addr_count); return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_mac_multicast_list_set( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_mac_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss) { efx_port_t *epp = &(enp->en_port); efx_rxq_t *old_rxq; boolean_t old_using_rss; efx_rc_t rc; ef10_filter_get_default_rxq(enp, &old_rxq, &old_using_rss); ef10_filter_default_rxq_set(enp, erp, using_rss); rc = efx_filter_reconfigure(enp, epp->ep_mac_addr, epp->ep_all_unicst, epp->ep_mulcst, epp->ep_all_mulcst, epp->ep_brdcst, epp->ep_mulcst_addr_list, epp->ep_mulcst_addr_count); if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); ef10_filter_default_rxq_set(enp, old_rxq, old_using_rss); return (rc); } void ef10_mac_filter_default_rxq_clear( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); ef10_filter_default_rxq_clear(enp); - efx_filter_reconfigure(enp, epp->ep_mac_addr, + (void) efx_filter_reconfigure(enp, epp->ep_mac_addr, epp->ep_all_unicst, epp->ep_mulcst, epp->ep_all_mulcst, epp->ep_brdcst, epp->ep_mulcst_addr_list, epp->ep_mulcst_addr_count); } #if EFSYS_OPT_LOOPBACK __checkReturn efx_rc_t ef10_mac_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t loopback_type) { efx_port_t *epp = &(enp->en_port); const efx_phy_ops_t *epop = epp->ep_epop; efx_loopback_type_t old_loopback_type; efx_link_mode_t old_loopback_link_mode; efx_rc_t rc; /* The PHY object handles this on EF10 */ old_loopback_type = epp->ep_loopback_type; old_loopback_link_mode = epp->ep_loopback_link_mode; epp->ep_loopback_type = loopback_type; epp->ep_loopback_link_mode = link_mode; if ((rc = epop->epo_reconfigure(enp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); epp->ep_loopback_type = old_loopback_type; epp->ep_loopback_link_mode = old_loopback_link_mode; return (rc); } #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS __checkReturn efx_rc_t ef10_mac_stats_get_mask( __in efx_nic_t *enp, __inout_bcount(mask_size) uint32_t *maskp, __in size_t mask_size) { const struct efx_mac_stats_range ef10_common[] = { { EFX_MAC_RX_OCTETS, EFX_MAC_RX_GE_15XX_PKTS }, { EFX_MAC_RX_FCS_ERRORS, EFX_MAC_RX_DROP_EVENTS }, { EFX_MAC_RX_JABBER_PKTS, EFX_MAC_RX_JABBER_PKTS }, { EFX_MAC_RX_NODESC_DROP_CNT, EFX_MAC_TX_PAUSE_PKTS }, }; const struct efx_mac_stats_range ef10_tx_size_bins[] = { { EFX_MAC_TX_LE_64_PKTS, EFX_MAC_TX_GE_15XX_PKTS }, }; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_port_t *epp = &(enp->en_port); efx_rc_t rc; if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, ef10_common, EFX_ARRAY_SIZE(ef10_common))) != 0) goto fail1; if (epp->ep_phy_cap_mask & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { const struct efx_mac_stats_range ef10_40g_extra[] = { { EFX_MAC_RX_ALIGN_ERRORS, EFX_MAC_RX_ALIGN_ERRORS }, }; if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, ef10_40g_extra, EFX_ARRAY_SIZE(ef10_40g_extra))) != 0) goto fail2; if (encp->enc_mac_stats_40g_tx_size_bins) { if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, ef10_tx_size_bins, EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0) goto fail3; } } else { if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, ef10_tx_size_bins, EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0) goto fail4; } if (encp->enc_pm_and_rxdp_counters) { const struct efx_mac_stats_range ef10_pm_and_rxdp[] = { { EFX_MAC_PM_TRUNC_BB_OVERFLOW, EFX_MAC_RXDP_HLB_WAIT }, }; if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, ef10_pm_and_rxdp, EFX_ARRAY_SIZE(ef10_pm_and_rxdp))) != 0) goto fail5; } if (encp->enc_datapath_cap_evb) { const struct efx_mac_stats_range ef10_vadaptor[] = { { EFX_MAC_VADAPTER_RX_UNICAST_PACKETS, EFX_MAC_VADAPTER_TX_OVERFLOW }, }; if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, ef10_vadaptor, EFX_ARRAY_SIZE(ef10_vadaptor))) != 0) goto fail6; } return (0); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #define EF10_MAC_STAT_READ(_esmp, _field, _eqp) \ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp) __checkReturn efx_rc_t ef10_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp) { efx_qword_t value; efx_qword_t generation_start; efx_qword_t generation_end; _NOTE(ARGUNUSED(enp)) /* Read END first so we don't race with the MC */ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END, &generation_end); EFSYS_MEM_READ_BARRIER(); /* TX */ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value); EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value); EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value); /* RX */ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value); EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value); EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]), &(value.eq_dword[0])); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]), &(value.eq_dword[1])); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]), &(value.eq_dword[0])); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]), &(value.eq_dword[1])); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]), &(value.eq_dword[0])); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]), &(value.eq_dword[1])); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]), &(value.eq_dword[0])); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]), &(value.eq_dword[1])); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value); /* Packet memory (EF10 only) */ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_BB_OVERFLOW]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_BB_OVERFLOW]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_VFIFO_FULL, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_VFIFO_FULL]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_VFIFO_FULL, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_VFIFO_FULL]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_QBB, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_QBB]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_QBB, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_QBB]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_MAPPING, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_MAPPING]), &value); /* RX datapath */ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_Q_DISABLED_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_Q_DISABLED_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_DI_DROPPED_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_DI_DROPPED_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_STREAMING_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_STREAMING_PKTS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_FETCH]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_WAIT]), &value); /* VADAPTER RX */ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_BYTES]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_PACKETS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_BYTES]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_OVERFLOW, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_OVERFLOW]), &value); /* VADAPTER TX */ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_BYTES]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_PACKETS]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_BYTES]), &value); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_OVERFLOW, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_OVERFLOW]), &value); EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); EFSYS_MEM_READ_BARRIER(); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START, &generation_start); /* Check that we didn't read the stats in the middle of a DMA */ /* Not a good enough check ? */ if (memcmp(&generation_start, &generation_end, sizeof (generation_start))) return (EAGAIN); if (generationp) *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0); return (0); } #endif /* EFSYS_OPT_MAC_STATS */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: stable/11/sys/dev/sfxge/common/ef10_nic.c =================================================================== --- stable/11/sys/dev/sfxge/common/ef10_nic.c (revision 342439) +++ stable/11/sys/dev/sfxge/common/ef10_nic.c (revision 342440) @@ -1,1835 +1,1835 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_MON_MCDI #include "mcdi_mon.h" #endif #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #include "ef10_tlv_layout.h" __checkReturn efx_rc_t efx_mcdi_get_port_assignment( __in efx_nic_t *enp, __out uint32_t *portp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_get_port_modes( __in efx_nic_t *enp, __out uint32_t *modesp, __out_opt uint32_t *current_modep) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN, MC_CMD_GET_PORT_MODES_OUT_LEN)]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PORT_MODES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } /* * Require only Modes and DefaultMode fields, unless the current mode * was requested (CurrentMode field was added for Medford). */ if (req.emr_out_length_used < MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) { rc = EMSGSIZE; goto fail2; } if ((current_modep != NULL) && (req.emr_out_length_used < MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) { rc = EMSGSIZE; goto fail3; } *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES); if (current_modep != NULL) { *current_modep = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_CURRENT_MODE); } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nic_get_port_mode_bandwidth( __in uint32_t port_mode, __out uint32_t *bandwidth_mbpsp) { uint32_t bandwidth; efx_rc_t rc; switch (port_mode) { case TLV_PORT_MODE_10G: bandwidth = 10000; break; case TLV_PORT_MODE_10G_10G: bandwidth = 10000 * 2; break; case TLV_PORT_MODE_10G_10G_10G_10G: case TLV_PORT_MODE_10G_10G_10G_10G_Q: case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2: case TLV_PORT_MODE_10G_10G_10G_10G_Q2: bandwidth = 10000 * 4; break; case TLV_PORT_MODE_40G: bandwidth = 40000; break; case TLV_PORT_MODE_40G_40G: bandwidth = 40000 * 2; break; case TLV_PORT_MODE_40G_10G_10G: case TLV_PORT_MODE_10G_10G_40G: bandwidth = 40000 + (10000 * 2); break; default: rc = EINVAL; goto fail1; } *bandwidth_mbpsp = bandwidth; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_vadaptor_alloc( __in efx_nic_t *enp, __in uint32_t port_id) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN, MC_CMD_VADAPTOR_ALLOC_OUT_LEN)]; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_VADAPTOR_ALLOC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN; MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS, VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED, enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_vadaptor_free( __in efx_nic_t *enp, __in uint32_t port_id) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN, MC_CMD_VADAPTOR_FREE_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_VADAPTOR_FREE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN; MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_get_mac_address_pf( __in efx_nic_t *enp, __out_ecount_opt(6) uint8_t mac_addrp[6]) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) { rc = EMSGSIZE; goto fail2; } if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) { rc = ENOENT; goto fail3; } if (mac_addrp != NULL) { uint8_t *addrp; addrp = MCDI_OUT2(req, uint8_t, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE); EFX_MAC_ADDR_COPY(mac_addrp, addrp); } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_get_mac_address_vf( __in efx_nic_t *enp, __out_ecount_opt(6) uint8_t mac_addrp[6]) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX; MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, EVB_PORT_ID_ASSIGNED); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) { rc = EMSGSIZE; goto fail2; } if (MCDI_OUT_DWORD(req, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) { rc = ENOENT; goto fail3; } if (mac_addrp != NULL) { uint8_t *addrp; addrp = MCDI_OUT2(req, uint8_t, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR); EFX_MAC_ADDR_COPY(mac_addrp, addrp); } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_get_clock( __in efx_nic_t *enp, __out uint32_t *sys_freqp, __out uint32_t *dpcpu_freqp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN, MC_CMD_GET_CLOCK_OUT_LEN)]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_CLOCK; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ); if (*sys_freqp == 0) { rc = EINVAL; goto fail3; } *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ); if (*dpcpu_freqp == 0) { rc = EINVAL; goto fail4; } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_get_vector_cfg( __in efx_nic_t *enp, __out_opt uint32_t *vec_basep, __out_opt uint32_t *pf_nvecp, __out_opt uint32_t *vf_nvecp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN, MC_CMD_GET_VECTOR_CFG_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_VECTOR_CFG; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) { rc = EMSGSIZE; goto fail2; } if (vec_basep != NULL) *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE); if (pf_nvecp != NULL) *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF); if (vf_nvecp != NULL) *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_get_capabilities( __in efx_nic_t *enp, __out uint32_t *flagsp, __out uint32_t *flags2p, __out uint32_t *tso2ncp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_CAPABILITIES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1); if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { *flags2p = 0; *tso2ncp = 0; } else { *flags2p = MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2); *tso2ncp = MCDI_OUT_WORD(req, GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS); } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_alloc_vis( __in efx_nic_t *enp, __in uint32_t min_vi_count, __in uint32_t max_vi_count, __out uint32_t *vi_basep, __out uint32_t *vi_countp, __out uint32_t *vi_shiftp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN, MC_CMD_ALLOC_VIS_EXT_OUT_LEN)]; efx_rc_t rc; if (vi_countp == NULL) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_ALLOC_VIS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN; MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count); MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) { rc = EMSGSIZE; goto fail3; } *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE); *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT); /* Report VI_SHIFT if available (always zero for Huntington) */ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN) *vi_shiftp = 0; else *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_free_vis( __in efx_nic_t *enp) { efx_mcdi_req_t req; efx_rc_t rc; EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0); EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0); req.emr_cmd = MC_CMD_FREE_VIS; req.emr_in_buf = NULL; req.emr_in_length = 0; req.emr_out_buf = NULL; req.emr_out_length = 0; efx_mcdi_execute_quiet(enp, &req); /* Ignore ELREADY (no allocated VIs, so nothing to free) */ if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_alloc_piobuf( __in efx_nic_t *enp, __out efx_piobuf_handle_t *handlep) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN, MC_CMD_ALLOC_PIOBUF_OUT_LEN)]; efx_rc_t rc; if (handlep == NULL) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_ALLOC_PIOBUF; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN; efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { rc = EMSGSIZE; goto fail3; } *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_free_piobuf( __in efx_nic_t *enp, __in efx_piobuf_handle_t handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN, MC_CMD_FREE_PIOBUF_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FREE_PIOBUF; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN; MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_link_piobuf( __in efx_nic_t *enp, __in uint32_t vi_index, __in efx_piobuf_handle_t handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN, MC_CMD_LINK_PIOBUF_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LINK_PIOBUF; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN; MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle); MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_unlink_piobuf( __in efx_nic_t *enp, __in uint32_t vi_index) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN, MC_CMD_UNLINK_PIOBUF_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_UNLINK_PIOBUF; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN; MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static void ef10_nic_alloc_piobufs( __in efx_nic_t *enp, __in uint32_t max_piobuf_count) { efx_piobuf_handle_t *handlep; unsigned int i; EFSYS_ASSERT3U(max_piobuf_count, <=, EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle)); enp->en_arch.ef10.ena_piobuf_count = 0; for (i = 0; i < max_piobuf_count; i++) { handlep = &enp->en_arch.ef10.ena_piobuf_handle[i]; if (efx_mcdi_alloc_piobuf(enp, handlep) != 0) goto fail1; enp->en_arch.ef10.ena_pio_alloc_map[i] = 0; enp->en_arch.ef10.ena_piobuf_count++; } return; fail1: for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { handlep = &enp->en_arch.ef10.ena_piobuf_handle[i]; - efx_mcdi_free_piobuf(enp, *handlep); + (void) efx_mcdi_free_piobuf(enp, *handlep); *handlep = EFX_PIOBUF_HANDLE_INVALID; } enp->en_arch.ef10.ena_piobuf_count = 0; } static void ef10_nic_free_piobufs( __in efx_nic_t *enp) { efx_piobuf_handle_t *handlep; unsigned int i; for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { handlep = &enp->en_arch.ef10.ena_piobuf_handle[i]; - efx_mcdi_free_piobuf(enp, *handlep); + (void) efx_mcdi_free_piobuf(enp, *handlep); *handlep = EFX_PIOBUF_HANDLE_INVALID; } enp->en_arch.ef10.ena_piobuf_count = 0; } /* Sub-allocate a block from a piobuf */ __checkReturn efx_rc_t ef10_nic_pio_alloc( __inout efx_nic_t *enp, __out uint32_t *bufnump, __out efx_piobuf_handle_t *handlep, __out uint32_t *blknump, __out uint32_t *offsetp, __out size_t *sizep) { efx_nic_cfg_t *encp = &enp->en_nic_cfg; efx_drv_cfg_t *edcp = &enp->en_drv_cfg; uint32_t blk_per_buf; uint32_t buf, blk; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); EFSYS_ASSERT(bufnump); EFSYS_ASSERT(handlep); EFSYS_ASSERT(blknump); EFSYS_ASSERT(offsetp); EFSYS_ASSERT(sizep); if ((edcp->edc_pio_alloc_size == 0) || (enp->en_arch.ef10.ena_piobuf_count == 0)) { rc = ENOMEM; goto fail1; } blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size; for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) { uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf]; if (~(*map) == 0) continue; EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map))); for (blk = 0; blk < blk_per_buf; blk++) { if ((*map & (1u << blk)) == 0) { *map |= (1u << blk); goto done; } } } rc = ENOMEM; goto fail2; done: *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf]; *bufnump = buf; *blknump = blk; *sizep = edcp->edc_pio_alloc_size; *offsetp = blk * (*sizep); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Free a piobuf sub-allocated block */ __checkReturn efx_rc_t ef10_nic_pio_free( __inout efx_nic_t *enp, __in uint32_t bufnum, __in uint32_t blknum) { uint32_t *map; efx_rc_t rc; if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) || (blknum >= (8 * sizeof (*map)))) { rc = EINVAL; goto fail1; } map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum]; if ((*map & (1u << blknum)) == 0) { rc = ENOENT; goto fail2; } *map &= ~(1u << blknum); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nic_pio_link( __inout efx_nic_t *enp, __in uint32_t vi_index, __in efx_piobuf_handle_t handle) { return (efx_mcdi_link_piobuf(enp, vi_index, handle)); } __checkReturn efx_rc_t ef10_nic_pio_unlink( __inout efx_nic_t *enp, __in uint32_t vi_index) { return (efx_mcdi_unlink_piobuf(enp, vi_index)); } static __checkReturn efx_rc_t ef10_mcdi_get_pf_count( __in efx_nic_t *enp, __out uint32_t *pf_countp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PF_COUNT_IN_LEN, MC_CMD_GET_PF_COUNT_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PF_COUNT; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *pf_countp = *MCDI_OUT(req, uint8_t, MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST); EFSYS_ASSERT(*pf_countp != 0); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_get_datapath_caps( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t flags; uint32_t flags2; uint32_t tso2nc; efx_rc_t rc; if ((rc = efx_mcdi_get_capabilities(enp, &flags, &flags2, &tso2nc)) != 0) goto fail1; if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0) goto fail1; #define CAP_FLAG(flags1, field) \ ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN))) #define CAP_FLAG2(flags2, field) \ ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN))) /* * Huntington RXDP firmware inserts a 0 or 14 byte prefix. * We only support the 14 byte prefix here. */ if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) { rc = ENOTSUP; goto fail2; } encp->enc_rx_prefix_size = 14; /* Check if the firmware supports TSO */ encp->enc_fw_assisted_tso_enabled = CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE; /* Check if the firmware supports FATSOv2 */ encp->enc_fw_assisted_tso_v2_enabled = CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE; /* Get the number of TSO contexts (FATSOv2) */ encp->enc_fw_assisted_tso_v2_n_contexts = CAP_FLAG2(flags2, TX_TSO_V2) ? tso2nc : 0; /* Check if the firmware has vadapter/vport/vswitch support */ encp->enc_datapath_cap_evb = CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE; /* Check if the firmware supports VLAN insertion */ encp->enc_hw_tx_insert_vlan_enabled = CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE; /* Check if the firmware supports RX event batching */ encp->enc_rx_batching_enabled = CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE; /* * Even if batching isn't reported as supported, we may still get * batched events. */ encp->enc_rx_batch_max = 16; /* Check if the firmware supports disabling scatter on RXQs */ encp->enc_rx_disable_scatter_supported = CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE; /* Check if the firmware supports set mac with running filters */ encp->enc_allow_set_mac_with_installed_filters = CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ? B_TRUE : B_FALSE; /* * Check if firmware supports the extended MC_CMD_SET_MAC, which allows * specifying which parameters to configure. */ encp->enc_enhanced_set_mac_supported = CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE; /* * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows * us to let the firmware choose the settings to use on an EVQ. */ encp->enc_init_evq_v2_supported = CAP_FLAG2(flags2, INIT_EVQ_V2) ? B_TRUE : B_FALSE; /* * Check if firmware-verified NVRAM updates must be used. * * The firmware trusted installer requires all NVRAM updates to use * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update) * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated * partition and report the result). */ encp->enc_fw_verified_nvram_update_required = CAP_FLAG2(flags2, NVRAM_UPDATE_REPORT_VERIFY_RESULT) ? B_TRUE : B_FALSE; /* * Check if firmware provides packet memory and Rx datapath * counters. */ encp->enc_pm_and_rxdp_counters = CAP_FLAG(flags, PM_AND_RXDP_COUNTERS) ? B_TRUE : B_FALSE; /* * Check if the 40G MAC hardware is capable of reporting * statistics for Tx size bins. */ encp->enc_mac_stats_40g_tx_size_bins = CAP_FLAG2(flags2, MAC_STATS_40G_TX_SIZE_BINS) ? B_TRUE : B_FALSE; /* * Check if firmware supports VXLAN and NVGRE tunnels. * The capability indicates Geneve protocol support as well. */ if (CAP_FLAG(flags, VXLAN_NVGRE)) encp->enc_tunnel_encapsulations_supported = (1u << EFX_TUNNEL_PROTOCOL_VXLAN) | (1u << EFX_TUNNEL_PROTOCOL_GENEVE) | (1u << EFX_TUNNEL_PROTOCOL_NVGRE); #undef CAP_FLAG #undef CAP_FLAG2 return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #define EF10_LEGACY_PF_PRIVILEGE_MASK \ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS) #define EF10_LEGACY_VF_PRIVILEGE_MASK 0 __checkReturn efx_rc_t ef10_get_privilege_mask( __in efx_nic_t *enp, __out uint32_t *maskp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t mask; efx_rc_t rc; if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf, &mask)) != 0) { if (rc != ENOTSUP) goto fail1; /* Fallback for old firmware without privilege mask support */ if (EFX_PCI_FUNCTION_IS_PF(encp)) { /* Assume PF has admin privilege */ mask = EF10_LEGACY_PF_PRIVILEGE_MASK; } else { /* VF is always unprivileged by default */ mask = EF10_LEGACY_VF_PRIVILEGE_MASK; } } *maskp = mask; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Table of mapping schemes from port number to the number of the external * connector on the board. The external numbering does not distinguish * off-board separated outputs such as from multi-headed cables. * * The count of adjacent port numbers that map to each external port * and the offset in the numbering, is determined by the chip family and * current port mode. * * For the Huntington family, the current port mode cannot be discovered, * so the mapping used is instead the last match in the table to the full * set of port modes to which the NIC can be configured. Therefore the * ordering of entries in the the mapping table is significant. */ static struct { efx_family_t family; uint32_t modes_mask; int32_t count; int32_t offset; } __ef10_external_port_mappings[] = { /* Supported modes with 1 output per external port */ { EFX_FAMILY_HUNTINGTON, (1 << TLV_PORT_MODE_10G) | (1 << TLV_PORT_MODE_10G_10G) | (1 << TLV_PORT_MODE_10G_10G_10G_10G), 1, 1 }, { EFX_FAMILY_MEDFORD, (1 << TLV_PORT_MODE_10G) | (1 << TLV_PORT_MODE_10G_10G), 1, 1 }, /* Supported modes with 2 outputs per external port */ { EFX_FAMILY_HUNTINGTON, (1 << TLV_PORT_MODE_40G) | (1 << TLV_PORT_MODE_40G_40G) | (1 << TLV_PORT_MODE_40G_10G_10G) | (1 << TLV_PORT_MODE_10G_10G_40G), 2, 1 }, { EFX_FAMILY_MEDFORD, (1 << TLV_PORT_MODE_40G) | (1 << TLV_PORT_MODE_40G_40G) | (1 << TLV_PORT_MODE_40G_10G_10G) | (1 << TLV_PORT_MODE_10G_10G_40G) | (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), 2, 1 }, /* Supported modes with 4 outputs per external port */ { EFX_FAMILY_MEDFORD, (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) | (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1), 4, 1, }, { EFX_FAMILY_MEDFORD, (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2), 4, 2 }, }; __checkReturn efx_rc_t ef10_external_port_mapping( __in efx_nic_t *enp, __in uint32_t port, __out uint8_t *external_portp) { efx_rc_t rc; int i; uint32_t port_modes; uint32_t matches; uint32_t current; int32_t count = 1; /* Default 1-1 mapping */ int32_t offset = 1; /* Default starting external port number */ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t)) != 0) { /* * No current port mode information * - infer mapping from available modes */ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, NULL)) != 0) { /* * No port mode information available * - use default mapping */ goto out; } } else { /* Only need to scan the current mode */ port_modes = 1 << current; } /* * Infer the internal port -> external port mapping from * the possible port modes for this NIC. */ for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) { if (__ef10_external_port_mappings[i].family != enp->en_family) continue; matches = (__ef10_external_port_mappings[i].modes_mask & port_modes); if (matches != 0) { count = __ef10_external_port_mappings[i].count; offset = __ef10_external_port_mappings[i].offset; port_modes &= ~matches; } } if (port_modes != 0) { /* Some advertised modes are not supported */ rc = ENOTSUP; goto fail1; } out: /* * Scale as required by last matched mode and then convert to * correctly offset numbering */ *external_portp = (uint8_t)((port / count) + offset); return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nic_probe( __in efx_nic_t *enp) { const efx_nic_ops_t *enop = enp->en_enop; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); /* Read and clear any assertion state */ if ((rc = efx_mcdi_read_assertion(enp)) != 0) goto fail1; /* Exit the assertion handler */ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) if (rc != EACCES) goto fail2; if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0) goto fail3; if ((rc = enop->eno_board_cfg(enp)) != 0) if (rc != EACCES) goto fail4; /* * Set default driver config limits (based on board config). * * FIXME: For now allocate a fixed number of VIs which is likely to be * sufficient and small enough to allow multiple functions on the same * port. */ edcp->edc_min_vi_count = edcp->edc_max_vi_count = MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit)); /* The client driver must configure and enable PIO buffer support */ edcp->edc_max_piobuf_count = 0; edcp->edc_pio_alloc_size = 0; #if EFSYS_OPT_MAC_STATS /* Wipe the MAC statistics */ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0) goto fail5; #endif #if EFSYS_OPT_LOOPBACK if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0) goto fail6; #endif #if EFSYS_OPT_MON_STATS if ((rc = mcdi_mon_cfg_build(enp)) != 0) { /* Unprivileged functions do not have access to sensors */ if (rc != EACCES) goto fail7; } #endif encp->enc_features = enp->en_features; return (0); #if EFSYS_OPT_MON_STATS fail7: EFSYS_PROBE(fail7); #endif #if EFSYS_OPT_LOOPBACK fail6: EFSYS_PROBE(fail6); #endif #if EFSYS_OPT_MAC_STATS fail5: EFSYS_PROBE(fail5); #endif fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nic_set_drv_limits( __inout efx_nic_t *enp, __in efx_drv_limits_t *edlp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); uint32_t min_evq_count, max_evq_count; uint32_t min_rxq_count, max_rxq_count; uint32_t min_txq_count, max_txq_count; efx_rc_t rc; if (edlp == NULL) { rc = EINVAL; goto fail1; } /* Get minimum required and maximum usable VI limits */ min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit); min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit); min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit); edcp->edc_min_vi_count = MAX(min_evq_count, MAX(min_rxq_count, min_txq_count)); max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit); max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit); max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit); edcp->edc_max_vi_count = MAX(max_evq_count, MAX(max_rxq_count, max_txq_count)); /* * Check limits for sub-allocated piobuf blocks. * PIO is optional, so don't fail if the limits are incorrect. */ if ((encp->enc_piobuf_size == 0) || (encp->enc_piobuf_limit == 0) || (edlp->edl_min_pio_alloc_size == 0) || (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) { /* Disable PIO */ edcp->edc_max_piobuf_count = 0; edcp->edc_pio_alloc_size = 0; } else { uint32_t blk_size, blk_count, blks_per_piobuf; blk_size = MAX(edlp->edl_min_pio_alloc_size, encp->enc_piobuf_min_alloc_size); blks_per_piobuf = encp->enc_piobuf_size / blk_size; EFSYS_ASSERT3U(blks_per_piobuf, <=, 32); blk_count = (encp->enc_piobuf_limit * blks_per_piobuf); /* A zero max pio alloc count means unlimited */ if ((edlp->edl_max_pio_alloc_count > 0) && (edlp->edl_max_pio_alloc_count < blk_count)) { blk_count = edlp->edl_max_pio_alloc_count; } edcp->edc_pio_alloc_size = blk_size; edcp->edc_max_piobuf_count = (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nic_reset( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN, MC_CMD_ENTITY_RESET_OUT_LEN)]; efx_rc_t rc; /* ef10_nic_reset() is called to recover from BADASSERT failures. */ if ((rc = efx_mcdi_read_assertion(enp)) != 0) goto fail1; if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) goto fail2; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_ENTITY_RESET; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN; MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG, ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } /* Clear RX/TX DMA queue errors */ enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nic_init( __in efx_nic_t *enp) { efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); uint32_t min_vi_count, max_vi_count; uint32_t vi_count, vi_base, vi_shift; uint32_t i; uint32_t retry; uint32_t delay_us; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); /* Enable reporting of some events (e.g. link change) */ if ((rc = efx_mcdi_log_ctrl(enp)) != 0) goto fail1; /* Allocate (optional) on-chip PIO buffers */ ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count); /* * For best performance, PIO writes should use a write-combined * (WC) memory mapping. Using a separate WC mapping for the PIO * aperture of each VI would be a burden to drivers (and not * possible if the host page size is >4Kbyte). * * To avoid this we use a single uncached (UC) mapping for VI * register access, and a single WC mapping for extra VIs used * for PIO writes. * * Each piobuf must be linked to a VI in the WC mapping, and to * each VI that is using a sub-allocated block from the piobuf. */ min_vi_count = edcp->edc_min_vi_count; max_vi_count = edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count; /* Ensure that the previously attached driver's VIs are freed */ if ((rc = efx_mcdi_free_vis(enp)) != 0) goto fail2; /* * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this * fails then retrying the request for fewer VI resources may succeed. */ vi_count = 0; if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count, &vi_base, &vi_count, &vi_shift)) != 0) goto fail3; EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count); if (vi_count < min_vi_count) { rc = ENOMEM; goto fail4; } enp->en_arch.ef10.ena_vi_base = vi_base; enp->en_arch.ef10.ena_vi_count = vi_count; enp->en_arch.ef10.ena_vi_shift = vi_shift; if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) { /* Not enough extra VIs to map piobufs */ ef10_nic_free_piobufs(enp); } enp->en_arch.ef10.ena_pio_write_vi_base = vi_count - enp->en_arch.ef10.ena_piobuf_count; /* Save UC memory mapping details */ enp->en_arch.ef10.ena_uc_mem_map_offset = 0; if (enp->en_arch.ef10.ena_piobuf_count > 0) { enp->en_arch.ef10.ena_uc_mem_map_size = (ER_DZ_TX_PIOBUF_STEP * enp->en_arch.ef10.ena_pio_write_vi_base); } else { enp->en_arch.ef10.ena_uc_mem_map_size = (ER_DZ_TX_PIOBUF_STEP * enp->en_arch.ef10.ena_vi_count); } /* Save WC memory mapping details */ enp->en_arch.ef10.ena_wc_mem_map_offset = enp->en_arch.ef10.ena_uc_mem_map_offset + enp->en_arch.ef10.ena_uc_mem_map_size; enp->en_arch.ef10.ena_wc_mem_map_size = (ER_DZ_TX_PIOBUF_STEP * enp->en_arch.ef10.ena_piobuf_count); /* Link piobufs to extra VIs in WC mapping */ if (enp->en_arch.ef10.ena_piobuf_count > 0) { for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { rc = efx_mcdi_link_piobuf(enp, enp->en_arch.ef10.ena_pio_write_vi_base + i, enp->en_arch.ef10.ena_piobuf_handle[i]); if (rc != 0) break; } } /* * Allocate a vAdaptor attached to our upstream vPort/pPort. * * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF * driver has yet to bring up the EVB port. See bug 56147. In this case, * retry the request several times after waiting a while. The wait time * between retries starts small (10ms) and exponentially increases. * Total wait time is a little over two seconds. Retry logic in the * client driver may mean this whole loop is repeated if it continues to * fail. */ retry = 0; delay_us = 10000; while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) { if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) || (rc != ENOENT)) { /* * Do not retry alloc for PF, or for other errors on * a VF. */ goto fail5; } /* VF startup before PF is ready. Retry allocation. */ if (retry > 5) { /* Too many attempts */ rc = EINVAL; goto fail6; } EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry); EFSYS_SLEEP(delay_us); retry++; if (delay_us < 500000) delay_us <<= 2; } enp->en_vport_id = EVB_PORT_ID_ASSIGNED; enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2; return (0); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); ef10_nic_free_piobufs(enp); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nic_get_vi_pool( __in efx_nic_t *enp, __out uint32_t *vi_countp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); /* * Report VIs that the client driver can use. * Do not include VIs used for PIO buffer writes. */ *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base; return (0); } __checkReturn efx_rc_t ef10_nic_get_bar_region( __in efx_nic_t *enp, __in efx_nic_region_t region, __out uint32_t *offsetp, __out size_t *sizep) { efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); /* * TODO: Specify host memory mapping alignment and granularity * in efx_drv_limits_t so that they can be taken into account * when allocating extra VIs for PIO writes. */ switch (region) { case EFX_REGION_VI: /* UC mapped memory BAR region for VI registers */ *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset; *sizep = enp->en_arch.ef10.ena_uc_mem_map_size; break; case EFX_REGION_PIO_WRITE_VI: /* WC mapped memory BAR region for piobuf writes */ *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset; *sizep = enp->en_arch.ef10.ena_wc_mem_map_size; break; default: rc = EINVAL; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_nic_fini( __in efx_nic_t *enp) { uint32_t i; efx_rc_t rc; (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id); enp->en_vport_id = 0; /* Unlink piobufs from extra VIs in WC mapping */ if (enp->en_arch.ef10.ena_piobuf_count > 0) { for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { rc = efx_mcdi_unlink_piobuf(enp, enp->en_arch.ef10.ena_pio_write_vi_base + i); if (rc != 0) break; } } ef10_nic_free_piobufs(enp); (void) efx_mcdi_free_vis(enp); enp->en_arch.ef10.ena_vi_count = 0; } void ef10_nic_unprobe( __in efx_nic_t *enp) { #if EFSYS_OPT_MON_STATS mcdi_mon_cfg_free(enp); #endif /* EFSYS_OPT_MON_STATS */ (void) efx_mcdi_drv_attach(enp, B_FALSE); } #if EFSYS_OPT_DIAG __checkReturn efx_rc_t ef10_nic_register_test( __in efx_nic_t *enp) { efx_rc_t rc; /* FIXME */ _NOTE(ARGUNUSED(enp)) _NOTE(CONSTANTCONDITION) if (B_FALSE) { rc = ENOTSUP; goto fail1; } /* FIXME */ return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: stable/11/sys/dev/sfxge/common/ef10_nvram.c =================================================================== --- stable/11/sys/dev/sfxge/common/ef10_nvram.c (revision 342439) +++ stable/11/sys/dev/sfxge/common/ef10_nvram.c (revision 342440) @@ -1,2391 +1,2391 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM #include "ef10_tlv_layout.h" /* Cursor for TLV partition format */ typedef struct tlv_cursor_s { uint32_t *block; /* Base of data block */ uint32_t *current; /* Cursor position */ uint32_t *end; /* End tag position */ uint32_t *limit; /* Last dword of data block */ } tlv_cursor_t; typedef struct nvram_partition_s { uint16_t type; uint8_t chip_select; uint8_t flags; /* * The full length of the NVRAM partition. * This is different from tlv_partition_header.total_length, * which can be smaller. */ uint32_t length; uint32_t erase_size; uint32_t *data; tlv_cursor_t tlv_cursor; } nvram_partition_t; static __checkReturn efx_rc_t tlv_validate_state( __inout tlv_cursor_t *cursor); static void tlv_init_block( __out uint32_t *block) { *block = __CPU_TO_LE_32(TLV_TAG_END); } static uint32_t tlv_tag( __in tlv_cursor_t *cursor) { uint32_t dword, tag; dword = cursor->current[0]; tag = __LE_TO_CPU_32(dword); return (tag); } static size_t tlv_length( __in tlv_cursor_t *cursor) { uint32_t dword, length; if (tlv_tag(cursor) == TLV_TAG_END) return (0); dword = cursor->current[1]; length = __LE_TO_CPU_32(dword); return ((size_t)length); } static uint8_t * tlv_value( __in tlv_cursor_t *cursor) { if (tlv_tag(cursor) == TLV_TAG_END) return (NULL); return ((uint8_t *)(&cursor->current[2])); } static uint8_t * tlv_item( __in tlv_cursor_t *cursor) { if (tlv_tag(cursor) == TLV_TAG_END) return (NULL); return ((uint8_t *)cursor->current); } /* * TLV item DWORD length is tag + length + value (rounded up to DWORD) * equivalent to tlv_n_words_for_len in mc-comms tlv.c */ #define TLV_DWORD_COUNT(length) \ (1 + 1 + (((length) + sizeof (uint32_t) - 1) / sizeof (uint32_t))) static uint32_t * tlv_next_item_ptr( __in tlv_cursor_t *cursor) { uint32_t length; length = tlv_length(cursor); return (cursor->current + TLV_DWORD_COUNT(length)); } static __checkReturn efx_rc_t tlv_advance( __inout tlv_cursor_t *cursor) { efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (cursor->current == cursor->end) { /* No more tags after END tag */ cursor->current = NULL; rc = ENOENT; goto fail2; } /* Advance to next item and validate */ cursor->current = tlv_next_item_ptr(cursor); if ((rc = tlv_validate_state(cursor)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static efx_rc_t tlv_rewind( __in tlv_cursor_t *cursor) { efx_rc_t rc; cursor->current = cursor->block; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static efx_rc_t tlv_find( __inout tlv_cursor_t *cursor, __in uint32_t tag) { efx_rc_t rc; rc = tlv_rewind(cursor); while (rc == 0) { if (tlv_tag(cursor) == tag) break; rc = tlv_advance(cursor); } return (rc); } static __checkReturn efx_rc_t tlv_validate_state( __inout tlv_cursor_t *cursor) { efx_rc_t rc; /* Check cursor position */ if (cursor->current < cursor->block) { rc = EINVAL; goto fail1; } if (cursor->current > cursor->limit) { rc = EINVAL; goto fail2; } if (tlv_tag(cursor) != TLV_TAG_END) { /* Check current item has space for tag and length */ if (cursor->current > (cursor->limit - 2)) { cursor->current = NULL; rc = EFAULT; goto fail3; } /* Check we have value data for current item and another tag */ if (tlv_next_item_ptr(cursor) > (cursor->limit - 1)) { cursor->current = NULL; rc = EFAULT; goto fail4; } } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static efx_rc_t tlv_init_cursor( __out tlv_cursor_t *cursor, __in uint32_t *block, __in uint32_t *limit, __in uint32_t *current) { cursor->block = block; cursor->limit = limit; cursor->current = current; cursor->end = NULL; return (tlv_validate_state(cursor)); } static __checkReturn efx_rc_t tlv_init_cursor_from_size( __out tlv_cursor_t *cursor, __in_bcount(size) uint8_t *block, __in size_t size) { uint32_t *limit; limit = (uint32_t *)(block + size - sizeof (uint32_t)); return (tlv_init_cursor(cursor, (uint32_t *)block, limit, (uint32_t *)block)); } static __checkReturn efx_rc_t tlv_init_cursor_at_offset( __out tlv_cursor_t *cursor, __in_bcount(size) uint8_t *block, __in size_t size, __in size_t offset) { uint32_t *limit; uint32_t *current; limit = (uint32_t *)(block + size - sizeof (uint32_t)); current = (uint32_t *)(block + offset); return (tlv_init_cursor(cursor, (uint32_t *)block, limit, current)); } static __checkReturn efx_rc_t tlv_require_end( __inout tlv_cursor_t *cursor) { uint32_t *pos; efx_rc_t rc; if (cursor->end == NULL) { pos = cursor->current; if ((rc = tlv_find(cursor, TLV_TAG_END)) != 0) goto fail1; cursor->end = cursor->current; cursor->current = pos; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static size_t tlv_block_length_used( __inout tlv_cursor_t *cursor) { efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if ((rc = tlv_require_end(cursor)) != 0) goto fail2; /* Return space used (including the END tag) */ return (cursor->end + 1 - cursor->block) * sizeof (uint32_t); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (0); } static uint32_t * tlv_last_segment_end( __in tlv_cursor_t *cursor) { tlv_cursor_t segment_cursor; uint32_t *last_segment_end = cursor->block; uint32_t *segment_start = cursor->block; /* * Go through each segment and check that it has an end tag. If there * is no end tag then the previous segment was the last valid one, * so return the pointer to its end tag. */ for (;;) { if (tlv_init_cursor(&segment_cursor, segment_start, cursor->limit, segment_start) != 0) break; if (tlv_require_end(&segment_cursor) != 0) break; last_segment_end = segment_cursor.end; segment_start = segment_cursor.end + 1; } return (last_segment_end); } static uint32_t * tlv_write( __in tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { uint32_t len = size; uint32_t *ptr; ptr = cursor->current; *ptr++ = __CPU_TO_LE_32(tag); *ptr++ = __CPU_TO_LE_32(len); if (len > 0) { ptr[(len - 1) / sizeof (uint32_t)] = 0; memcpy(ptr, data, len); ptr += P2ROUNDUP(len, sizeof (uint32_t)) / sizeof (*ptr); } return (ptr); } static __checkReturn efx_rc_t tlv_insert( __inout tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { unsigned int delta; uint32_t *last_segment_end; efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if ((rc = tlv_require_end(cursor)) != 0) goto fail2; if (tag == TLV_TAG_END) { rc = EINVAL; goto fail3; } last_segment_end = tlv_last_segment_end(cursor); delta = TLV_DWORD_COUNT(size); if (last_segment_end + 1 + delta > cursor->limit) { rc = ENOSPC; goto fail4; } /* Move data up: new space at cursor->current */ memmove(cursor->current + delta, cursor->current, (last_segment_end + 1 - cursor->current) * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end += delta; /* Write new TLV item */ tlv_write(cursor, tag, data, size); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t tlv_delete( __inout tlv_cursor_t *cursor) { unsigned int delta; uint32_t *last_segment_end; efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (tlv_tag(cursor) == TLV_TAG_END) { rc = EINVAL; goto fail2; } delta = TLV_DWORD_COUNT(tlv_length(cursor)); if ((rc = tlv_require_end(cursor)) != 0) goto fail3; last_segment_end = tlv_last_segment_end(cursor); /* Shuffle things down, destroying the item at cursor->current */ memmove(cursor->current, cursor->current + delta, (last_segment_end + 1 - cursor->current) * sizeof (uint32_t)); /* Zero the new space at the end of the TLV chain */ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end -= delta; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t tlv_modify( __inout tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { uint32_t *pos; unsigned int old_ndwords; unsigned int new_ndwords; unsigned int delta; uint32_t *last_segment_end; efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (tlv_tag(cursor) == TLV_TAG_END) { rc = EINVAL; goto fail2; } if (tlv_tag(cursor) != tag) { rc = EINVAL; goto fail3; } old_ndwords = TLV_DWORD_COUNT(tlv_length(cursor)); new_ndwords = TLV_DWORD_COUNT(size); if ((rc = tlv_require_end(cursor)) != 0) goto fail4; last_segment_end = tlv_last_segment_end(cursor); if (new_ndwords > old_ndwords) { /* Expand space used for TLV item */ delta = new_ndwords - old_ndwords; pos = cursor->current + old_ndwords; if (last_segment_end + 1 + delta > cursor->limit) { rc = ENOSPC; goto fail5; } /* Move up: new space at (cursor->current + old_ndwords) */ memmove(pos + delta, pos, (last_segment_end + 1 - pos) * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end += delta; } else if (new_ndwords < old_ndwords) { /* Shrink space used for TLV item */ delta = old_ndwords - new_ndwords; pos = cursor->current + new_ndwords; /* Move down: remove words at (cursor->current + new_ndwords) */ memmove(pos, pos + delta, (last_segment_end + 1 - pos) * sizeof (uint32_t)); /* Zero the new space at the end of the TLV chain */ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end -= delta; } /* Write new data */ tlv_write(cursor, tag, data, size); return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static uint32_t checksum_tlv_partition( __in nvram_partition_t *partition) { tlv_cursor_t *cursor; uint32_t *ptr; uint32_t *end; uint32_t csum; size_t len; cursor = &partition->tlv_cursor; len = tlv_block_length_used(cursor); EFSYS_ASSERT3U((len & 3), ==, 0); csum = 0; ptr = partition->data; end = &ptr[len >> 2]; while (ptr < end) csum += __LE_TO_CPU_32(*ptr++); return (csum); } static __checkReturn efx_rc_t tlv_update_partition_len_and_cks( __in tlv_cursor_t *cursor) { efx_rc_t rc; nvram_partition_t partition; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t new_len; /* * We just modified the partition, so the total length may not be * valid. Don't use tlv_find(), which performs some sanity checks * that may fail here. */ partition.data = cursor->block; memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor)); header = (struct tlv_partition_header *)partition.data; /* Sanity check. */ if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) { rc = EFAULT; goto fail1; } new_len = tlv_block_length_used(&partition.tlv_cursor); if (new_len == 0) { rc = EFAULT; goto fail2; } header->total_length = __CPU_TO_LE_32(new_len); /* Ensure the modified partition always has a new generation count. */ header->generation = __CPU_TO_LE_32( __LE_TO_CPU_32(header->generation) + 1); trailer = (struct tlv_partition_trailer *)((uint8_t *)header + new_len - sizeof (*trailer) - sizeof (uint32_t)); trailer->generation = header->generation; trailer->checksum = __CPU_TO_LE_32( __LE_TO_CPU_32(trailer->checksum) - checksum_tlv_partition(&partition)); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Validate buffer contents (before writing to flash) */ __checkReturn efx_rc_t ef10_nvram_buffer_validate( __in efx_nic_t *enp, __in uint32_t partn, __in_bcount(partn_size) caddr_t partn_data, __in size_t partn_size) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t total_length; uint32_t cksum; int pos; efx_rc_t rc; _NOTE(ARGUNUSED(enp, partn)) EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK); if ((partn_data == NULL) || (partn_size == 0)) { rc = EINVAL; goto fail1; } /* The partition header must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)partn_data, partn_size)) != 0) { rc = EFAULT; goto fail2; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail3; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV partition length (includes the END tag) */ total_length = __LE_TO_CPU_32(header->total_length); if (total_length > partn_size) { rc = EFBIG; goto fail4; } /* Check partition ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail5; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail6; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail7; } /* Check generation counts are consistent */ if (trailer->generation != header->generation) { rc = EINVAL; goto fail8; } /* Verify partition checksum */ cksum = 0; for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(partn_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail9; } return (0); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_create( __in efx_nic_t *enp, __in uint16_t partn_type, __in_bcount(partn_size) caddr_t partn_data, __in size_t partn_size) { uint32_t *buf = (uint32_t *)partn_data; efx_rc_t rc; tlv_cursor_t cursor; struct tlv_partition_header header; struct tlv_partition_trailer trailer; unsigned int min_buf_size = sizeof (struct tlv_partition_header) + sizeof (struct tlv_partition_trailer); if (partn_size < min_buf_size) { rc = EINVAL; goto fail1; } memset(buf, 0xff, partn_size); tlv_init_block(buf); if ((rc = tlv_init_cursor(&cursor, buf, (uint32_t *)((uint8_t *)buf + partn_size), buf)) != 0) { goto fail2; } header.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_HEADER); header.length = __CPU_TO_LE_32(sizeof (header) - 8); header.type_id = __CPU_TO_LE_16(partn_type); header.preset = 0; header.generation = __CPU_TO_LE_32(1); header.total_length = 0; /* This will be fixed below. */ if ((rc = tlv_insert( &cursor, TLV_TAG_PARTITION_HEADER, (uint8_t *)&header.type_id, sizeof (header) - 8)) != 0) goto fail3; if ((rc = tlv_advance(&cursor)) != 0) goto fail4; trailer.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_TRAILER); trailer.length = __CPU_TO_LE_32(sizeof (trailer) - 8); trailer.generation = header.generation; trailer.checksum = 0; /* This will be fixed below. */ if ((rc = tlv_insert(&cursor, TLV_TAG_PARTITION_TRAILER, (uint8_t *)&trailer.generation, sizeof (trailer) - 8)) != 0) goto fail5; if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0) goto fail6; /* Check that the partition is valid. */ if ((rc = ef10_nvram_buffer_validate(enp, partn_type, partn_data, partn_size)) != 0) goto fail7; return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static uint32_t byte_offset( __in uint32_t *position, __in uint32_t *base) { return (uint32_t)((uint8_t *)position - (uint8_t *)base); } __checkReturn efx_rc_t ef10_nvram_buffer_find_item_start( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __out uint32_t *startp) { /* Read past partition header to find start address of the first key */ tlv_cursor_t cursor; efx_rc_t rc; /* A PARTITION_HEADER tag must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail3; } *startp = byte_offset(cursor.current, cursor.block); if ((rc = tlv_require_end(&cursor)) != 0) goto fail4; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_find_end( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *endp) { /* Read to end of partition */ tlv_cursor_t cursor; efx_rc_t rc; uint32_t *segment_used; _NOTE(ARGUNUSED(offset)) if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } segment_used = cursor.block; /* * Go through each segment and check that it has an end tag. If there * is no end tag then the previous segment was the last valid one, * so return the used space including that end tag. */ while (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) { if (tlv_require_end(&cursor) != 0) { if (segment_used == cursor.block) { /* * First segment is corrupt, so there is * no valid data in partition. */ rc = EINVAL; goto fail2; } break; } segment_used = cursor.end + 1; cursor.current = segment_used; } /* Return space used (including the END tag) */ *endp = (segment_used - cursor.block) * sizeof (uint32_t); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn __success(return != B_FALSE) boolean_t ef10_nvram_buffer_find_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, __out uint32_t *lengthp) { /* Find TLV at offset and return key start and length */ tlv_cursor_t cursor; uint8_t *key; uint32_t tag; if (tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset) != 0) { return (B_FALSE); } while ((key = tlv_item(&cursor)) != NULL) { tag = tlv_tag(&cursor); if (tag == TLV_TAG_PARTITION_HEADER || tag == TLV_TAG_PARTITION_TRAILER) { if (tlv_advance(&cursor) != 0) { break; } continue; } *startp = byte_offset(cursor.current, cursor.block); *lengthp = byte_offset(tlv_next_item_ptr(&cursor), cursor.current); return (B_TRUE); } return (B_FALSE); } __checkReturn efx_rc_t ef10_nvram_buffer_get_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(item_max_size, *lengthp) caddr_t itemp, __in size_t item_max_size, __out uint32_t *lengthp) { efx_rc_t rc; tlv_cursor_t cursor; uint32_t item_length; if (item_max_size < length) { rc = ENOSPC; goto fail1; } if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset)) != 0) { goto fail2; } item_length = tlv_length(&cursor); if (length < item_length) { rc = ENOSPC; goto fail3; } memcpy(itemp, tlv_value(&cursor), item_length); *lengthp = item_length; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_insert_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, __out uint32_t *lengthp) { efx_rc_t rc; tlv_cursor_t cursor; if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset)) != 0) { goto fail1; } rc = tlv_insert(&cursor, TLV_TAG_LICENSE, (uint8_t *)keyp, length); if (rc != 0) { goto fail2; } *lengthp = byte_offset(tlv_next_item_ptr(&cursor), cursor.current); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_delete_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end) { efx_rc_t rc; tlv_cursor_t cursor; _NOTE(ARGUNUSED(length, end)) if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset)) != 0) { goto fail1; } if ((rc = tlv_delete(&cursor)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_finish( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size) { efx_rc_t rc; tlv_cursor_t cursor; if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } if ((rc = tlv_require_end(&cursor)) != 0) goto fail2; if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Read and validate a segment from a partition. A segment is a complete * tlv chain between PARTITION_HEADER and PARTITION_END tags. There may * be multiple segments in a partition, so seg_offset allows segments * beyond the first to be read. */ static __checkReturn efx_rc_t ef10_nvram_read_tlv_segment( __in efx_nic_t *enp, __in uint32_t partn, __in size_t seg_offset, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t total_length; uint32_t cksum; int pos; efx_rc_t rc; EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK); if ((seg_data == NULL) || (max_seg_size == 0)) { rc = EINVAL; goto fail1; } /* Read initial chunk of the segment, starting at offset */ if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset, seg_data, EF10_NVRAM_CHUNK, MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) { goto fail2; } /* A PARTITION_HEADER tag must be the first item at the given offset */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail3; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail4; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV segment length (includes the END tag) */ total_length = __LE_TO_CPU_32(header->total_length); if (total_length > max_seg_size) { rc = EFBIG; goto fail5; } /* Read the remaining segment content */ if (total_length > EF10_NVRAM_CHUNK) { if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset + EF10_NVRAM_CHUNK, seg_data + EF10_NVRAM_CHUNK, total_length - EF10_NVRAM_CHUNK, MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) goto fail6; } /* Check segment ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail7; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail8; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail9; } /* Check data read from segment is consistent */ if (trailer->generation != header->generation) { /* * The partition data may have been modified between successive * MCDI NVRAM_READ requests by the MC or another PCI function. * * The caller must retry to obtain consistent partition data. */ rc = EAGAIN; goto fail10; } /* Verify segment checksum */ cksum = 0; for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail11; } return (0); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Read a single TLV item from a host memory * buffer containing a TLV formatted segment. */ __checkReturn efx_rc_t ef10_nvram_buf_read_tlv( __in efx_nic_t *enp, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep) { tlv_cursor_t cursor; caddr_t data; size_t length; caddr_t value; efx_rc_t rc; _NOTE(ARGUNUSED(enp)) if ((seg_data == NULL) || (max_seg_size == 0)) { rc = EINVAL; goto fail1; } /* Find requested TLV tag in segment data */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail2; } if ((rc = tlv_find(&cursor, tag)) != 0) { rc = ENOENT; goto fail3; } value = (caddr_t)tlv_value(&cursor); length = tlv_length(&cursor); if (length == 0) data = NULL; else { /* Copy out data from TLV item */ EFSYS_KMEM_ALLOC(enp->en_esip, length, data); if (data == NULL) { rc = ENOMEM; goto fail4; } memcpy(data, value, length); } *datap = data; *sizep = length; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Read a single TLV item from the first segment in a TLV formatted partition */ __checkReturn efx_rc_t ef10_nvram_partn_read_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __deref_out_bcount_opt(*seg_sizep) caddr_t *seg_datap, __out size_t *seg_sizep) { caddr_t seg_data = NULL; size_t partn_size = 0; size_t length; caddr_t data; int retry; efx_rc_t rc; /* Allocate sufficient memory for the entire partition */ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0) goto fail1; if (partn_size == 0) { rc = ENOENT; goto fail2; } EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, seg_data); if (seg_data == NULL) { rc = ENOMEM; goto fail3; } /* * Read the first segment in a TLV partition. Retry until consistent * segment contents are returned. Inconsistent data may be read if: * a) the segment contents are invalid * b) the MC has rebooted while we were reading the partition * c) the partition has been modified while we were reading it * Limit retry attempts to ensure forward progress. */ retry = 10; do { rc = ef10_nvram_read_tlv_segment(enp, partn, 0, seg_data, partn_size); } while ((rc == EAGAIN) && (--retry > 0)); if (rc != 0) { /* Failed to obtain consistent segment data */ goto fail4; } if ((rc = ef10_nvram_buf_read_tlv(enp, seg_data, partn_size, tag, &data, &length)) != 0) goto fail5; EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data); *seg_datap = data; *seg_sizep = length; return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Compute the size of a segment. */ static __checkReturn efx_rc_t ef10_nvram_buf_segment_size( __in caddr_t seg_data, __in size_t max_seg_size, __out size_t *seg_sizep) { efx_rc_t rc; tlv_cursor_t cursor; struct tlv_partition_header *header; uint32_t cksum; int pos; uint32_t *end_tag_position; uint32_t segment_length; /* A PARTITION_HEADER tag must be the first item at the given offset */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV segment length (includes the END tag) */ *seg_sizep = __LE_TO_CPU_32(header->total_length); if (*seg_sizep > max_seg_size) { rc = EFBIG; goto fail3; } /* Check segment ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail4; } if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail5; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail6; } end_tag_position = cursor.current; /* Verify segment checksum */ cksum = 0; for (pos = 0; (size_t)pos < *seg_sizep; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail7; } /* * Calculate total length from HEADER to END tags and compare to * max_seg_size and the total_length field in the HEADER tag. */ segment_length = tlv_block_length_used(&cursor); if (segment_length > max_seg_size) { rc = EINVAL; goto fail8; } if (segment_length != *seg_sizep) { rc = EINVAL; goto fail9; } /* Skip over the first HEADER tag. */ rc = tlv_rewind(&cursor); rc = tlv_advance(&cursor); while (rc == 0) { if (tlv_tag(&cursor) == TLV_TAG_END) { /* Check that the END tag is the one found earlier. */ if (cursor.current != end_tag_position) goto fail10; break; } /* Check for duplicate HEADER tags before the END tag. */ if (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail11; } rc = tlv_advance(&cursor); } if (rc != 0) goto fail12; return (0); fail12: EFSYS_PROBE(fail12); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in a host memory buffer containing a TLV * formatted segment. Historically partitions consisted of only one segment. */ __checkReturn efx_rc_t ef10_nvram_buf_write_tlv( __inout_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __in_bcount(tag_size) caddr_t tag_data, __in size_t tag_size, __out size_t *total_lengthp) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; uint32_t generation; uint32_t cksum; int pos; efx_rc_t rc; /* A PARTITION_HEADER tag must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Update the TLV chain to contain the new data */ if ((rc = tlv_find(&cursor, tag)) == 0) { /* Modify existing TLV item */ if ((rc = tlv_modify(&cursor, tag, (uint8_t *)tag_data, tag_size)) != 0) goto fail3; } else { /* Insert a new TLV item before the PARTITION_TRAILER */ rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER); if (rc != 0) { rc = EINVAL; goto fail4; } if ((rc = tlv_insert(&cursor, tag, (uint8_t *)tag_data, tag_size)) != 0) { rc = EINVAL; goto fail5; } } /* Find the trailer tag */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail6; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); /* Update PARTITION_HEADER and PARTITION_TRAILER fields */ *total_lengthp = tlv_block_length_used(&cursor); if (*total_lengthp > max_seg_size) { rc = ENOSPC; goto fail7; } generation = __LE_TO_CPU_32(header->generation) + 1; header->total_length = __CPU_TO_LE_32(*total_lengthp); header->generation = __CPU_TO_LE_32(generation); trailer->generation = __CPU_TO_LE_32(generation); /* Recompute PARTITION_TRAILER checksum */ trailer->checksum = 0; cksum = 0; for (pos = 0; (size_t)pos < *total_lengthp; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } trailer->checksum = ~cksum + 1; return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in the first segment of a TLV formatted * dynamic config partition. The first segment is the current active * configuration. */ __checkReturn efx_rc_t ef10_nvram_partn_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size) { return ef10_nvram_partn_write_segment_tlv(enp, partn, tag, data, size, B_FALSE); } /* * Read a segment from nvram at the given offset into a buffer (segment_data) * and optionally write a new tag to it. */ static __checkReturn efx_rc_t ef10_nvram_segment_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __inout caddr_t *seg_datap, __inout size_t *partn_offsetp, __inout size_t *src_remain_lenp, __inout size_t *dest_remain_lenp, __in boolean_t write) { efx_rc_t rc; efx_rc_t status; size_t original_segment_size; size_t modified_segment_size; /* * Read the segment from NVRAM into the segment_data buffer and validate * it, returning if it does not validate. This is not a failure unless * this is the first segment in a partition. In this case the caller * must propagate the error. */ status = ef10_nvram_read_tlv_segment(enp, partn, *partn_offsetp, *seg_datap, *src_remain_lenp); if (status != 0) { rc = EINVAL; goto fail1; } status = ef10_nvram_buf_segment_size(*seg_datap, *src_remain_lenp, &original_segment_size); if (status != 0) { rc = EINVAL; goto fail2; } if (write) { /* Update the contents of the segment in the buffer */ if ((rc = ef10_nvram_buf_write_tlv(*seg_datap, *dest_remain_lenp, tag, data, size, &modified_segment_size)) != 0) { goto fail3; } *dest_remain_lenp -= modified_segment_size; *seg_datap += modified_segment_size; } else { /* * We won't modify this segment, but still need to update the * remaining lengths and pointers. */ *dest_remain_lenp -= original_segment_size; *seg_datap += original_segment_size; } *partn_offsetp += original_segment_size; *src_remain_lenp -= original_segment_size; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in either the first segment or in all * segments in a TLV formatted dynamic config partition. Dynamic config * partitions on boards that support RFID are divided into a number of segments, * each formatted like a partition, with header, trailer and end tags. The first * segment is the current active configuration. * * The segments are initialised by manftest and each contain a different * configuration e.g. firmware variant. The firmware can be instructed * via RFID to copy a segment to replace the first segment, hence changing the * active configuration. This allows ops to change the configuration of a board * prior to shipment using RFID. * * Changes to the dynamic config may need to be written to all segments (e.g. * firmware versions) or just the first segment (changes to the active * configuration). See SF-111324-SW "The use of RFID in Solarflare Products". * If only the first segment is written the code still needs to be aware of the * possible presence of subsequent segments as writing to a segment may cause * its size to increase, which would overwrite the subsequent segments and * invalidate them. */ __checkReturn efx_rc_t ef10_nvram_partn_write_segment_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t all_segments) { size_t partn_size = 0; caddr_t partn_data; size_t total_length = 0; efx_rc_t rc; size_t current_offset = 0; size_t remaining_original_length; size_t remaining_modified_length; caddr_t segment_data; EFSYS_ASSERT3U(partn, ==, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG); /* Allocate sufficient memory for the entire partition */ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0) goto fail1; EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, partn_data); if (partn_data == NULL) { rc = ENOMEM; goto fail2; } remaining_original_length = partn_size; remaining_modified_length = partn_size; segment_data = partn_data; /* Lock the partition */ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0) goto fail3; /* Iterate over each (potential) segment to update it. */ do { boolean_t write = all_segments || current_offset == 0; rc = ef10_nvram_segment_write_tlv(enp, partn, tag, data, size, &segment_data, ¤t_offset, &remaining_original_length, &remaining_modified_length, write); if (rc != 0) { if (current_offset == 0) { /* * If no data has been read then the first * segment is invalid, which is an error. */ goto fail4; } break; } } while (current_offset < partn_size); total_length = segment_data - partn_data; /* * We've run out of space. This should actually be dealt with by * ef10_nvram_buf_write_tlv returning ENOSPC. */ if (total_length > partn_size) { rc = ENOSPC; goto fail5; } /* Erase the whole partition in NVRAM */ if ((rc = ef10_nvram_partn_erase(enp, partn, 0, partn_size)) != 0) goto fail6; /* Write new partition contents from the buffer to NVRAM */ if ((rc = ef10_nvram_partn_write(enp, partn, 0, partn_data, total_length)) != 0) goto fail7; /* Unlock the partition */ - ef10_nvram_partn_unlock(enp, partn, NULL); + (void) ef10_nvram_partn_unlock(enp, partn, NULL); EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data); return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); - ef10_nvram_partn_unlock(enp, partn, NULL); + (void) ef10_nvram_partn_unlock(enp, partn, NULL); fail3: EFSYS_PROBE(fail3); EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Get the size of a NVRAM partition. This is the total size allocated in nvram, * not the data used by the segments in the partition. */ __checkReturn efx_rc_t ef10_nvram_partn_size( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *sizep) { efx_rc_t rc; if ((rc = efx_mcdi_nvram_info(enp, partn, sizep, NULL, NULL, NULL)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_lock( __in efx_nic_t *enp, __in uint32_t partn) { efx_rc_t rc; if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_read_mode( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size, __in uint32_t mode) { size_t chunk; efx_rc_t rc; while (size > 0) { chunk = MIN(size, EF10_NVRAM_CHUNK); if ((rc = efx_mcdi_nvram_read(enp, partn, offset, data, chunk, mode)) != 0) { goto fail1; } size -= chunk; data += chunk; offset += chunk; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_read( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { /* * Read requests which come in through the EFX API expect to * read the current, active partition. */ return ef10_nvram_partn_read_mode(enp, partn, offset, data, size, MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT); } __checkReturn efx_rc_t ef10_nvram_partn_erase( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __in size_t size) { efx_rc_t rc; uint32_t erase_size; if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL, &erase_size, NULL)) != 0) goto fail1; if (erase_size == 0) { if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) goto fail2; } else { if (size % erase_size != 0) { rc = EINVAL; goto fail3; } while (size > 0) { if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, erase_size)) != 0) goto fail4; offset += erase_size; size -= erase_size; } } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_write( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { size_t chunk; uint32_t write_size; efx_rc_t rc; if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL, NULL, &write_size)) != 0) goto fail1; if (write_size != 0) { /* * Check that the size is a multiple of the write chunk size if * the write chunk size is available. */ if (size % write_size != 0) { rc = EINVAL; goto fail2; } } else { write_size = EF10_NVRAM_CHUNK; } while (size > 0) { chunk = MIN(size, write_size); if ((rc = efx_mcdi_nvram_write(enp, partn, offset, data, chunk)) != 0) { goto fail3; } size -= chunk; data += chunk; offset += chunk; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_unlock( __in efx_nic_t *enp, __in uint32_t partn, __out_opt uint32_t *resultp) { boolean_t reboot = B_FALSE; efx_rc_t rc; if (resultp != NULL) *resultp = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN; rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, resultp); if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_set_version( __in efx_nic_t *enp, __in uint32_t partn, __in_ecount(4) uint16_t version[4]) { struct tlv_partition_version partn_version; size_t size; efx_rc_t rc; /* Add or modify partition version TLV item */ partn_version.version_w = __CPU_TO_LE_16(version[0]); partn_version.version_x = __CPU_TO_LE_16(version[1]); partn_version.version_y = __CPU_TO_LE_16(version[2]); partn_version.version_z = __CPU_TO_LE_16(version[3]); size = sizeof (partn_version) - (2 * sizeof (uint32_t)); /* Write the version number to all segments in the partition */ if ((rc = ef10_nvram_partn_write_segment_tlv(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, TLV_TAG_PARTITION_VERSION(partn), (caddr_t)&partn_version.version_w, size, B_TRUE)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ #if EFSYS_OPT_NVRAM typedef struct ef10_parttbl_entry_s { unsigned int partn; unsigned int port; efx_nvram_type_t nvtype; } ef10_parttbl_entry_t; /* Translate EFX NVRAM types to firmware partition types */ static ef10_parttbl_entry_t hunt_parttbl[] = { {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 3, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 4, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE} }; static ef10_parttbl_entry_t medford_parttbl[] = { {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 2, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 3, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 4, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP}, {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE}, {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 1, EFX_NVRAM_UEFIROM}, {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 2, EFX_NVRAM_UEFIROM}, {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 3, EFX_NVRAM_UEFIROM}, {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 4, EFX_NVRAM_UEFIROM} }; static __checkReturn efx_rc_t ef10_parttbl_get( __in efx_nic_t *enp, __out ef10_parttbl_entry_t **parttblp, __out size_t *parttbl_rowsp) { switch (enp->en_family) { case EFX_FAMILY_HUNTINGTON: *parttblp = hunt_parttbl; *parttbl_rowsp = EFX_ARRAY_SIZE(hunt_parttbl); break; case EFX_FAMILY_MEDFORD: *parttblp = medford_parttbl; *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl); break; default: EFSYS_ASSERT(B_FALSE); return (EINVAL); } return (0); } __checkReturn efx_rc_t ef10_nvram_type_to_partn( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *partnp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); ef10_parttbl_entry_t *parttbl = NULL; size_t parttbl_rows = 0; unsigned int i; EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); EFSYS_ASSERT(partnp != NULL); if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) { for (i = 0; i < parttbl_rows; i++) { ef10_parttbl_entry_t *entry = &parttbl[i]; if (entry->nvtype == type && entry->port == emip->emi_port) { *partnp = entry->partn; return (0); } } } return (ENOTSUP); } #if EFSYS_OPT_DIAG static __checkReturn efx_rc_t ef10_nvram_partn_to_type( __in efx_nic_t *enp, __in uint32_t partn, __out efx_nvram_type_t *typep) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); ef10_parttbl_entry_t *parttbl = NULL; size_t parttbl_rows = 0; unsigned int i; EFSYS_ASSERT(typep != NULL); if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) { for (i = 0; i < parttbl_rows; i++) { ef10_parttbl_entry_t *entry = &parttbl[i]; if (entry->partn == partn && entry->port == emip->emi_port) { *typep = entry->nvtype; return (0); } } } return (ENOTSUP); } __checkReturn efx_rc_t ef10_nvram_test( __in efx_nic_t *enp) { efx_nvram_type_t type; unsigned int npartns = 0; uint32_t *partns = NULL; size_t size; unsigned int i; efx_rc_t rc; /* Read available partitions from NVRAM partition map */ size = MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM * sizeof (uint32_t); EFSYS_KMEM_ALLOC(enp->en_esip, size, partns); if (partns == NULL) { rc = ENOMEM; goto fail1; } if ((rc = efx_mcdi_nvram_partitions(enp, (caddr_t)partns, size, &npartns)) != 0) { goto fail2; } for (i = 0; i < npartns; i++) { /* Check if the partition is supported for this port */ if ((rc = ef10_nvram_partn_to_type(enp, partns[i], &type)) != 0) continue; if ((rc = efx_mcdi_nvram_test(enp, partns[i])) != 0) goto fail3; } EFSYS_KMEM_FREE(enp->en_esip, size, partns); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, size, partns); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ __checkReturn efx_rc_t ef10_nvram_partn_get_version( __in efx_nic_t *enp, __in uint32_t partn, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]) { efx_rc_t rc; /* FIXME: get highest partn version from all ports */ /* FIXME: return partn description if available */ if ((rc = efx_mcdi_nvram_metadata(enp, partn, subtypep, version, NULL, 0)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_rw_start( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *chunk_sizep) { efx_rc_t rc; if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0) goto fail1; if (chunk_sizep != NULL) *chunk_sizep = EF10_NVRAM_CHUNK; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_rw_finish( __in efx_nic_t *enp, __in uint32_t partn) { efx_rc_t rc; if ((rc = ef10_nvram_partn_unlock(enp, partn, NULL)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_NVRAM */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: stable/11/sys/dev/sfxge/common/ef10_tx.c =================================================================== --- stable/11/sys/dev/sfxge/common/ef10_tx.c (revision 342439) +++ stable/11/sys/dev/sfxge/common/ef10_tx.c (revision 342440) @@ -1,796 +1,798 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_QSTATS #define EFX_TX_QSTAT_INCR(_etp, _stat) \ do { \ (_etp)->et_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_TX_QSTAT_INCR(_etp, _stat) #endif static __checkReturn efx_rc_t efx_mcdi_init_txq( __in efx_nic_t *enp, __in uint32_t size, __in uint32_t target_evq, __in uint32_t label, __in uint32_t instance, __in uint16_t flags, __in efsys_mem_t *esmp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS), MC_CMD_INIT_TXQ_OUT_LEN)]; efx_qword_t *dma_addr; uint64_t addr; int npages; int i; efx_rc_t rc; EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >= EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs)); npages = EFX_TXQ_NBUFS(size); if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_TXQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance); MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS, INIT_TXQ_IN_FLAG_BUFF_MODE, 0, INIT_TXQ_IN_FLAG_IP_CSUM_DIS, (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1, INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1, INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN, (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0, INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN, (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0, INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, INIT_TXQ_IN_CRC_MODE, 0, INIT_TXQ_IN_FLAG_TIMESTAMP, 0); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_fini_txq( __in efx_nic_t *enp, __in uint32_t instance) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN, MC_CMD_FINI_TXQ_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FINI_TXQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN; MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: /* * EALREADY is not an error, but indicates that the MC has rebooted and * that the TXQ has already been destroyed. */ if (rc != EALREADY) EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_tx_init( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) return (0); } void ef10_tx_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } __checkReturn efx_rc_t ef10_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __in efx_txq_t *etp, __out unsigned int *addedp) { efx_nic_cfg_t *encp = &enp->en_nic_cfg; uint16_t inner_csum; efx_desc_t desc; efx_rc_t rc; _NOTE(ARGUNUSED(id)) inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP; if (((flags & inner_csum) != 0) && (encp->enc_tunnel_encapsulations_supported == 0)) { rc = EINVAL; goto fail1; } if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags, esmp)) != 0) goto fail2; /* * A previous user of this TX queue may have written a descriptor to the * TX push collector, but not pushed the doorbell (e.g. after a crash). * The next doorbell write would then push the stale descriptor. * * Ensure the (per network port) TX push collector is cleared by writing * a no-op TX option descriptor. See bug29981 for details. */ *addedp = 1; ef10_tx_qdesc_checksum_create(etp, flags, &desc); EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq); ef10_tx_qpush(etp, *addedp, 0); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_tx_qdestroy( __in efx_txq_t *etp) { /* FIXME */ _NOTE(ARGUNUSED(etp)) /* FIXME */ } __checkReturn efx_rc_t ef10_tx_qpio_enable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_piobuf_handle_t handle; efx_rc_t rc; if (etp->et_pio_size != 0) { rc = EALREADY; goto fail1; } /* Sub-allocate a PIO block from a piobuf */ if ((rc = ef10_nic_pio_alloc(enp, &etp->et_pio_bufnum, &handle, &etp->et_pio_blknum, &etp->et_pio_offset, &etp->et_pio_size)) != 0) { goto fail2; } EFSYS_ASSERT3U(etp->et_pio_size, !=, 0); /* Link the piobuf to this TXQ */ if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) { goto fail3; } /* * et_pio_offset is the offset of the sub-allocated block within the * hardware PIO buffer. It is used as the buffer address in the PIO * option descriptor. * * et_pio_write_offset is the offset of the sub-allocated block from the * start of the write-combined memory mapping, and is used for writing * data into the PIO buffer. */ etp->et_pio_write_offset = (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) + ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset; return (0); fail3: EFSYS_PROBE(fail3); - ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); + (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); fail2: EFSYS_PROBE(fail2); etp->et_pio_size = 0; fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_tx_qpio_disable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; if (etp->et_pio_size != 0) { /* Unlink the piobuf from this TXQ */ - ef10_nic_pio_unlink(enp, etp->et_index); + if (ef10_nic_pio_unlink(enp, etp->et_index) != 0) + return; /* Free the sub-allocated PIO block */ - ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); + (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, + etp->et_pio_blknum); etp->et_pio_size = 0; etp->et_pio_write_offset = 0; } } __checkReturn efx_rc_t ef10_tx_qpio_write( __in efx_txq_t *etp, __in_ecount(length) uint8_t *buffer, __in size_t length, __in size_t offset) { efx_nic_t *enp = etp->et_enp; efsys_bar_t *esbp = enp->en_esbp; uint32_t write_offset; uint32_t write_offset_limit; efx_qword_t *eqp; efx_rc_t rc; EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0); if (etp->et_pio_size == 0) { rc = ENOENT; goto fail1; } if (offset + length > etp->et_pio_size) { rc = ENOSPC; goto fail2; } /* * Writes to PIO buffers must be 64 bit aligned, and multiples of * 64 bits. */ write_offset = etp->et_pio_write_offset + offset; write_offset_limit = write_offset + length; eqp = (efx_qword_t *)buffer; while (write_offset < write_offset_limit) { EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp); eqp++; write_offset += sizeof (efx_qword_t); } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_tx_qpio_post( __in efx_txq_t *etp, __in size_t pkt_length, __in unsigned int completed, __inout unsigned int *addedp) { efx_qword_t pio_desc; unsigned int id; size_t offset; unsigned int added = *addedp; efx_rc_t rc; if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } if (etp->et_pio_size == 0) { rc = ENOENT; goto fail2; } id = added++ & etp->et_mask; offset = id * sizeof (efx_qword_t); EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index, unsigned int, id, uint32_t, etp->et_pio_offset, size_t, pkt_length); EFX_POPULATE_QWORD_5(pio_desc, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, 1, ESF_DZ_TX_PIO_CONT, 0, ESF_DZ_TX_PIO_BYTE_CNT, pkt_length, ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset); EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc); EFX_TX_QSTAT_INCR(etp, TX_POST_PIO); *addedp = added; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_tx_qpost( __in efx_txq_t *etp, __in_ecount(n) efx_buffer_t *eb, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; efx_rc_t rc; if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } for (i = 0; i < n; i++) { efx_buffer_t *ebp = &eb[i]; efsys_dma_addr_t addr = ebp->eb_addr; size_t size = ebp->eb_size; boolean_t eop = ebp->eb_eop; unsigned int id; size_t offset; efx_qword_t qword; /* No limitations on boundary crossing */ EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); id = added++ & etp->et_mask; offset = id * sizeof (efx_qword_t); EFSYS_PROBE5(tx_post, unsigned int, etp->et_index, unsigned int, id, efsys_dma_addr_t, addr, size_t, size, boolean_t, eop); EFX_POPULATE_QWORD_5(qword, ESF_DZ_TX_KER_TYPE, 0, ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword); } EFX_TX_QSTAT_INCR(etp, TX_POST); *addedp = added; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * This improves performance by, when possible, pushing a TX descriptor at the * same time as the doorbell. The descriptor must be added to the TXQ, so that * can be used if the hardware decides not to use the pushed descriptor. */ void ef10_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed) { efx_nic_t *enp = etp->et_enp; unsigned int wptr; unsigned int id; size_t offset; efx_qword_t desc; efx_oword_t oword; wptr = added & etp->et_mask; id = pushed & etp->et_mask; offset = id * sizeof (efx_qword_t); EFSYS_MEM_READQ(etp->et_esmp, offset, &desc); /* * SF Bug 65776: TSO option descriptors cannot be pushed if pacer bypass * is enabled on the event queue this transmit queue is attached to. * * To ensure the code is safe, it is easiest to simply test the type of * the descriptor to push, and only push it is if it not a TSO option * descriptor. */ if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) || (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) != ESE_DZ_TX_OPTION_DESC_TSO)) { /* Push the descriptor and update the wptr. */ EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr, ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1), ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0)); /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index, &oword); } else { efx_dword_t dword; /* * Only update the wptr. This is signalled to the hardware by * only writing one DWORD of the doorbell register. */ EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr); dword = oword.eo_dword[2]; /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index, &dword, B_FALSE); } } __checkReturn efx_rc_t ef10_tx_qdesc_post( __in efx_txq_t *etp, __in_ecount(n) efx_desc_t *ed, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; efx_rc_t rc; if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } for (i = 0; i < n; i++) { efx_desc_t *edp = &ed[i]; unsigned int id; size_t offset; id = added++ & etp->et_mask; offset = id * sizeof (efx_desc_t); EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq); } EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index, unsigned int, added, unsigned int, n); EFX_TX_QSTAT_INCR(etp, TX_POST); *addedp = added; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp) { _NOTE(ARGUNUSED(etp)) /* No limitations on boundary crossing */ EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index, efsys_dma_addr_t, addr, size_t, size, boolean_t, eop); EFX_POPULATE_QWORD_5(edp->ed_eq, ESF_DZ_TX_KER_TYPE, 0, ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); } void ef10_tx_qdesc_tso_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint8_t tcp_flags, __out efx_desc_t *edp) { _NOTE(ARGUNUSED(etp)) EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index, uint16_t, ipv4_id, uint32_t, tcp_seq, uint8_t, tcp_flags); EFX_POPULATE_QWORD_5(edp->ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, ESF_DZ_TX_TSO_IP_ID, ipv4_id, ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); } void ef10_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, __in int count) { _NOTE(ARGUNUSED(etp, count)) EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index, uint16_t, ipv4_id, uint32_t, tcp_seq, uint16_t, tcp_mss); EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS); EFX_POPULATE_QWORD_5(edp[0].ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, ESF_DZ_TX_TSO_OPTION_TYPE, ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, ESF_DZ_TX_TSO_IP_ID, ipv4_id, ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); EFX_POPULATE_QWORD_4(edp[1].ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, ESF_DZ_TX_TSO_OPTION_TYPE, ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, ESF_DZ_TX_TSO_TCP_MSS, tcp_mss); } void ef10_tx_qdesc_vlantci_create( __in efx_txq_t *etp, __in uint16_t tci, __out efx_desc_t *edp) { _NOTE(ARGUNUSED(etp)) EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index, uint16_t, tci); EFX_POPULATE_QWORD_4(edp->ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_VLAN, ESF_DZ_TX_VLAN_OP, tci ? 1 : 0, ESF_DZ_TX_VLAN_TAG1, tci); } void ef10_tx_qdesc_checksum_create( __in efx_txq_t *etp, __in uint16_t flags, __out efx_desc_t *edp) { _NOTE(ARGUNUSED(etp)); EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index, uint32_t, flags); EFX_POPULATE_QWORD_6(edp->ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, ESF_DZ_TX_OPTION_UDP_TCP_CSUM, (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0, ESF_DZ_TX_OPTION_IP_CSUM, (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0, ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, ESF_DZ_TX_OPTION_INNER_IP_CSUM, (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0); } __checkReturn efx_rc_t ef10_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns) { efx_rc_t rc; /* FIXME */ _NOTE(ARGUNUSED(etp, ns)) _NOTE(CONSTANTCONDITION) if (B_FALSE) { rc = ENOTSUP; goto fail1; } /* FIXME */ return (0); fail1: /* * EALREADY is not an error, but indicates that the MC has rebooted and * that the TXQ has already been destroyed. Callers need to know that * the TXQ flush has completed to avoid waiting until timeout for a * flush done event that will not be delivered. */ if (rc != EALREADY) EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_tx_qflush( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_rc_t rc; if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_tx_qenable( __in efx_txq_t *etp) { /* FIXME */ _NOTE(ARGUNUSED(etp)) /* FIXME */ } #if EFSYS_OPT_QSTATS void ef10_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) { unsigned int id; for (id = 0; id < TX_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, etp->et_stat[id]); etp->et_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: stable/11/sys/dev/sfxge/common/efx_port.c =================================================================== --- stable/11/sys/dev/sfxge/common/efx_port.c (revision 342439) +++ stable/11/sys/dev/sfxge/common/efx_port.c (revision 342440) @@ -1,254 +1,254 @@ /*- * Copyright (c) 2009-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" __checkReturn efx_rc_t efx_port_init( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); const efx_phy_ops_t *epop = epp->ep_epop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (enp->en_mod_flags & EFX_MOD_PORT) { rc = EINVAL; goto fail1; } enp->en_mod_flags |= EFX_MOD_PORT; epp->ep_mac_type = EFX_MAC_INVALID; epp->ep_link_mode = EFX_LINK_UNKNOWN; epp->ep_mac_drain = B_TRUE; /* Configure the MAC */ if ((rc = efx_mac_select(enp)) != 0) goto fail1; epp->ep_emop->emo_reconfigure(enp); /* Pick up current phy capababilities */ - efx_port_poll(enp, NULL); + (void) efx_port_poll(enp, NULL); /* * Turn on the PHY if available, otherwise reset it, and * reconfigure it with the current configuration. */ if (epop->epo_power != NULL) { if ((rc = epop->epo_power(enp, B_TRUE)) != 0) goto fail2; } else { if ((rc = epop->epo_reset(enp)) != 0) goto fail2; } EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_PHY); enp->en_reset_flags &= ~EFX_RESET_PHY; if ((rc = epop->epo_reconfigure(enp)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_mod_flags &= ~EFX_MOD_PORT; return (rc); } __checkReturn efx_rc_t efx_port_poll( __in efx_nic_t *enp, __out_opt efx_link_mode_t *link_modep) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_link_mode_t ignore_link_mode; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); if (link_modep == NULL) link_modep = &ignore_link_mode; if ((rc = emop->emo_poll(enp, link_modep)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_LOOPBACK __checkReturn efx_rc_t efx_port_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t loopback_type) { efx_port_t *epp = &(enp->en_port); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); const efx_mac_ops_t *emop = epp->ep_emop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); EFSYS_ASSERT(link_mode < EFX_LINK_NMODES); if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode], (int)loopback_type) == 0) { rc = ENOTSUP; goto fail1; } if (epp->ep_loopback_type == loopback_type && epp->ep_loopback_link_mode == link_mode) return (0); if ((rc = emop->emo_loopback_set(enp, link_mode, loopback_type)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_NAMES static const char * const __efx_loopback_type_name[] = { "OFF", "DATA", "GMAC", "XGMII", "XGXS", "XAUI", "GMII", "SGMII", "XGBR", "XFI", "XAUI_FAR", "GMII_FAR", "SGMII_FAR", "XFI_FAR", "GPHY", "PHY_XS", "PCS", "PMA_PMD", "XPORT", "XGMII_WS", "XAUI_WS", "XAUI_WS_FAR", "XAUI_WS_NEAR", "GMII_WS", "XFI_WS", "XFI_WS_FAR", "PHYXS_WS", "PMA_INT", "SD_NEAR", "SD_FAR", "PMA_INT_WS", "SD_FEP2_WS", "SD_FEP1_5_WS", "SD_FEP_WS", "SD_FES_WS", }; __checkReturn const char * efx_loopback_type_name( __in efx_nic_t *enp, __in efx_loopback_type_t type) { EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__efx_loopback_type_name) == EFX_LOOPBACK_NTYPES); _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(type, <, EFX_LOOPBACK_NTYPES); return (__efx_loopback_type_name[type]); } #endif /* EFSYS_OPT_NAMES */ #endif /* EFSYS_OPT_LOOPBACK */ void efx_port_fini( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); const efx_phy_ops_t *epop = epp->ep_epop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(epp->ep_mac_drain); epp->ep_emop = NULL; epp->ep_mac_type = EFX_MAC_INVALID; epp->ep_mac_drain = B_FALSE; /* Turn off the PHY */ if (epop->epo_power != NULL) (void) epop->epo_power(enp, B_FALSE); enp->en_mod_flags &= ~EFX_MOD_PORT; } Index: stable/11 =================================================================== --- stable/11 (revision 342439) +++ stable/11 (revision 342440) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r341213