Index: stable/10/sys/dev/sfxge/common/ef10_filter.c =================================================================== --- stable/10/sys/dev/sfxge/common/ef10_filter.c (revision 342486) +++ stable/10/sys/dev/sfxge/common/ef10_filter.c (revision 342487) @@ -1,1541 +1,1660 @@ /*- * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_FILTER #define EFE_SPEC(eftp, index) ((eftp)->eft_entry[(index)].efe_spec) static efx_filter_spec_t * ef10_filter_entry_spec( __in const ef10_filter_table_t *eftp, __in unsigned int index) { return ((efx_filter_spec_t *)(EFE_SPEC(eftp, index) & ~(uintptr_t)EFX_EF10_FILTER_FLAGS)); } static boolean_t ef10_filter_entry_is_busy( __in const ef10_filter_table_t *eftp, __in unsigned int index) { if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_BUSY) return (B_TRUE); else return (B_FALSE); } static boolean_t ef10_filter_entry_is_auto_old( __in const ef10_filter_table_t *eftp, __in unsigned int index) { if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_AUTO_OLD) return (B_TRUE); else return (B_FALSE); } static void ef10_filter_set_entry( __inout ef10_filter_table_t *eftp, __in unsigned int index, __in_opt const efx_filter_spec_t *efsp) { EFE_SPEC(eftp, index) = (uintptr_t)efsp; } static void ef10_filter_set_entry_busy( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_BUSY; } static void ef10_filter_set_entry_not_busy( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_BUSY; } static void ef10_filter_set_entry_auto_old( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL); EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD; } static void ef10_filter_set_entry_not_auto_old( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD; EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL); } __checkReturn efx_rc_t ef10_filter_init( __in efx_nic_t *enp) { efx_rc_t rc; ef10_filter_table_t *eftp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); #define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match)) EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_UNKNOWN_MCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST)); EFX_STATIC_ASSERT((uint32_t)EFX_FILTER_MATCH_UNKNOWN_UCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST)); #undef MATCH_MASK EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp); if (!eftp) { rc = ENOMEM; goto fail1; } enp->en_filter.ef_ef10_filter_table = eftp; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_filter_fini( __in efx_nic_t *enp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); if (enp->en_filter.ef_ef10_filter_table != NULL) { EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t), enp->en_filter.ef_ef10_filter_table); } } static __checkReturn efx_rc_t efx_mcdi_filter_op_add( __in efx_nic_t *enp, __in efx_filter_spec_t *spec, __in unsigned int filter_op, __inout ef10_filter_handle_t *handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN, MC_CMD_FILTER_OP_EXT_OUT_LEN)]; efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FILTER_OP; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN; switch (filter_op) { case MC_CMD_FILTER_OP_IN_OP_REPLACE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO, handle->efh_lo); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI, handle->efh_hi); /* Fall through */ case MC_CMD_FILTER_OP_IN_OP_INSERT: case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, filter_op); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail1; } MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_MATCH_FIELDS, spec->efs_match_flags); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST, MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE, spec->efs_dmaq_id); if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) { MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_CONTEXT, spec->efs_rss_context); } MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_MODE, spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ? MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS : MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_TX_DEST, MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT); if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) { /* * NOTE: Unlike most MCDI requests, the filter fields * are presented in network (big endian) byte order. */ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_MAC), spec->efs_rem_mac, EFX_MAC_ADDR_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_MAC), spec->efs_loc_mac, EFX_MAC_ADDR_LEN); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_SRC_PORT, __CPU_TO_BE_16(spec->efs_rem_port)); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_DST_PORT, __CPU_TO_BE_16(spec->efs_loc_port)); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_ETHER_TYPE, __CPU_TO_BE_16(spec->efs_ether_type)); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_INNER_VLAN, __CPU_TO_BE_16(spec->efs_inner_vid)); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_OUTER_VLAN, __CPU_TO_BE_16(spec->efs_outer_vid)); /* IP protocol (in low byte, high byte is zero) */ MCDI_IN_SET_BYTE(req, FILTER_OP_EXT_IN_IP_PROTO, spec->efs_ip_proto); EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) == MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN); EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) == MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_IP), &spec->efs_rem_host.eo_byte[0], MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_IP), &spec->efs_loc_host.eo_byte[0], MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN); /* * On Medford, filters for encapsulated packets match based on * the ether type and IP protocol in the outer frame. In * addition we need to fill in the VNI or VSID type field. */ switch (spec->efs_encap_type) { case EFX_TUNNEL_PROTOCOL_NONE: break; case EFX_TUNNEL_PROTOCOL_VXLAN: case EFX_TUNNEL_PROTOCOL_GENEVE: MCDI_IN_POPULATE_DWORD_1(req, FILTER_OP_EXT_IN_VNI_OR_VSID, FILTER_OP_EXT_IN_VNI_TYPE, spec->efs_encap_type == EFX_TUNNEL_PROTOCOL_VXLAN ? MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN : MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE); break; case EFX_TUNNEL_PROTOCOL_NVGRE: MCDI_IN_POPULATE_DWORD_1(req, FILTER_OP_EXT_IN_VNI_OR_VSID, FILTER_OP_EXT_IN_VSID_TYPE, MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail2; } } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) { rc = EMSGSIZE; goto fail4; } handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_LO); handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_HI); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_filter_op_delete( __in efx_nic_t *enp, __in unsigned int filter_op, __inout ef10_filter_handle_t *handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN, MC_CMD_FILTER_OP_EXT_OUT_LEN)]; efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FILTER_OP; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN; switch (filter_op) { case MC_CMD_FILTER_OP_IN_OP_REMOVE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, MC_CMD_FILTER_OP_IN_OP_REMOVE); break; case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail1; } MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO, handle->efh_lo); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI, handle->efh_hi); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) { rc = EMSGSIZE; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn boolean_t ef10_filter_equal( __in const efx_filter_spec_t *left, __in const efx_filter_spec_t *right) { /* FIXME: Consider rx vs tx filters (look at efs_flags) */ if (left->efs_match_flags != right->efs_match_flags) return (B_FALSE); if (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host)) return (B_FALSE); if (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host)) return (B_FALSE); if (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN)) return (B_FALSE); if (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN)) return (B_FALSE); if (left->efs_rem_port != right->efs_rem_port) return (B_FALSE); if (left->efs_loc_port != right->efs_loc_port) return (B_FALSE); if (left->efs_inner_vid != right->efs_inner_vid) return (B_FALSE); if (left->efs_outer_vid != right->efs_outer_vid) return (B_FALSE); if (left->efs_ether_type != right->efs_ether_type) return (B_FALSE); if (left->efs_ip_proto != right->efs_ip_proto) return (B_FALSE); if (left->efs_encap_type != right->efs_encap_type) return (B_FALSE); return (B_TRUE); } static __checkReturn boolean_t ef10_filter_same_dest( __in const efx_filter_spec_t *left, __in const efx_filter_spec_t *right) { if ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) && (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) { if (left->efs_rss_context == right->efs_rss_context) return (B_TRUE); } else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) && (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) { if (left->efs_dmaq_id == right->efs_dmaq_id) return (B_TRUE); } return (B_FALSE); } static __checkReturn uint32_t ef10_filter_hash( __in efx_filter_spec_t *spec) { EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t)) == 0); EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) % sizeof (uint32_t)) == 0); /* * As the area of the efx_filter_spec_t we need to hash is DWORD * aligned and an exact number of DWORDs in size we can use the * optimised efx_hash_dwords() rather than efx_hash_bytes() */ return (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid, (sizeof (efx_filter_spec_t) - EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) / sizeof (uint32_t), 0)); } /* * Decide whether a filter should be exclusive or else should allow * delivery to additional recipients. Currently we decide that * filters for specific local unicast MAC and IP addresses are * exclusive. */ static __checkReturn boolean_t ef10_filter_is_exclusive( __in efx_filter_spec_t *spec) { if ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) && !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac)) return (B_TRUE); if ((spec->efs_match_flags & (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) && ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe)) return (B_TRUE); if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) && (spec->efs_loc_host.eo_u8[0] != 0xff)) return (B_TRUE); } return (B_FALSE); } __checkReturn efx_rc_t ef10_filter_restore( __in efx_nic_t *enp) { int tbl_id; efx_filter_spec_t *spec; ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; boolean_t restoring; efsys_lock_state_t state; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) { EFSYS_LOCK(enp->en_eslp, state); spec = ef10_filter_entry_spec(eftp, tbl_id); if (spec == NULL) { restoring = B_FALSE; } else if (ef10_filter_entry_is_busy(eftp, tbl_id)) { /* Ignore busy entries. */ restoring = B_FALSE; } else { ef10_filter_set_entry_busy(eftp, tbl_id); restoring = B_TRUE; } EFSYS_UNLOCK(enp->en_eslp, state); if (restoring == B_FALSE) continue; if (ef10_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_INSERT, &eftp->eft_entry[tbl_id].efe_handle); } else { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE, &eftp->eft_entry[tbl_id].efe_handle); } if (rc != 0) goto fail1; EFSYS_LOCK(enp->en_eslp, state); ef10_filter_set_entry_not_busy(eftp, tbl_id); EFSYS_UNLOCK(enp->en_eslp, state); } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * An arbitrary search limit for the software hash table. As per the linux net * driver. */ #define EF10_FILTER_SEARCH_LIMIT 200 static __checkReturn efx_rc_t ef10_filter_add_internal( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace, __out_opt uint32_t *filter_id) { efx_rc_t rc; ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t *saved_spec; uint32_t hash; unsigned int depth; int ins_index; boolean_t replacing = B_FALSE; unsigned int i; efsys_lock_state_t state; boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); #if EFSYS_OPT_RX_SCALE spec->efs_rss_context = enp->en_rss_context; #endif hash = ef10_filter_hash(spec); /* * FIXME: Add support for inserting filters of different priorities * and removing lower priority multicast filters (bug 42378) */ /* * Find any existing filters with the same match tuple or * else a free slot to insert at. If any of them are busy, * we have to wait and retry. */ for (;;) { ins_index = -1; depth = 1; EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; for (;;) { i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1); saved_spec = ef10_filter_entry_spec(eftp, i); if (!saved_spec) { if (ins_index < 0) { ins_index = i; } } else if (ef10_filter_equal(spec, saved_spec)) { if (ef10_filter_entry_is_busy(eftp, i)) break; if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) { ins_index = i; goto found; } else if (ef10_filter_is_exclusive(spec)) { if (may_replace) { ins_index = i; goto found; } else { rc = EEXIST; goto fail1; } } /* Leave existing */ } /* * Once we reach the maximum search depth, use * the first suitable slot or return EBUSY if * there was none. */ if (depth == EF10_FILTER_SEARCH_LIMIT) { if (ins_index < 0) { rc = EBUSY; goto fail2; } goto found; } depth++; } EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; } found: /* * Create a software table entry if necessary, and mark it * busy. We might yet fail to insert, but any attempt to * insert a conflicting filter while we're waiting for the * firmware must find the busy entry. */ saved_spec = ef10_filter_entry_spec(eftp, ins_index); if (saved_spec) { if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) { /* This is a filter we are refreshing */ ef10_filter_set_entry_not_auto_old(eftp, ins_index); goto out_unlock; } replacing = B_TRUE; } else { EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), saved_spec); if (!saved_spec) { rc = ENOMEM; goto fail3; } *saved_spec = *spec; ef10_filter_set_entry(eftp, ins_index, saved_spec); } ef10_filter_set_entry_busy(eftp, ins_index); EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; /* * On replacing the filter handle may change after after a successful * replace operation. */ if (replacing) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_REPLACE, &eftp->eft_entry[ins_index].efe_handle); } else if (ef10_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_INSERT, &eftp->eft_entry[ins_index].efe_handle); } else { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE, &eftp->eft_entry[ins_index].efe_handle); } if (rc != 0) goto fail4; EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; if (replacing) { /* Update the fields that may differ */ saved_spec->efs_priority = spec->efs_priority; saved_spec->efs_flags = spec->efs_flags; saved_spec->efs_rss_context = spec->efs_rss_context; saved_spec->efs_dmaq_id = spec->efs_dmaq_id; } ef10_filter_set_entry_not_busy(eftp, ins_index); out_unlock: EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; if (filter_id) *filter_id = ins_index; return (0); fail4: EFSYS_PROBE(fail4); if (!replacing) { EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), saved_spec); saved_spec = NULL; } ef10_filter_set_entry_not_busy(eftp, ins_index); ef10_filter_set_entry(eftp, ins_index, NULL); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); if (locked) EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } __checkReturn efx_rc_t ef10_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace) { efx_rc_t rc; rc = ef10_filter_add_internal(enp, spec, may_replace, NULL); if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_delete_internal( __in efx_nic_t *enp, __in uint32_t filter_id) { efx_rc_t rc; ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t *spec; efsys_lock_state_t state; uint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS; /* * Find the software table entry and mark it busy. Don't * remove it yet; any attempt to update while we're waiting * for the firmware must find the busy entry. * * FIXME: What if the busy flag is never cleared? */ EFSYS_LOCK(enp->en_eslp, state); while (ef10_filter_entry_is_busy(table, filter_idx)) { EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_SPIN(1); EFSYS_LOCK(enp->en_eslp, state); } if ((spec = ef10_filter_entry_spec(table, filter_idx)) != NULL) { ef10_filter_set_entry_busy(table, filter_idx); } EFSYS_UNLOCK(enp->en_eslp, state); if (spec == NULL) { rc = ENOENT; goto fail1; } /* * Try to remove the hardware filter. This may fail if the MC has * rebooted (which frees all hardware filter resources). */ if (ef10_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_delete(enp, MC_CMD_FILTER_OP_IN_OP_REMOVE, &table->eft_entry[filter_idx].efe_handle); } else { rc = efx_mcdi_filter_op_delete(enp, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE, &table->eft_entry[filter_idx].efe_handle); } /* Free the software table entry */ EFSYS_LOCK(enp->en_eslp, state); ef10_filter_set_entry_not_busy(table, filter_idx); ef10_filter_set_entry(table, filter_idx, NULL); EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec); /* Check result of hardware filter removal */ if (rc != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec) { efx_rc_t rc; ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t *saved_spec; unsigned int hash; unsigned int depth; unsigned int i; efsys_lock_state_t state; boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); hash = ef10_filter_hash(spec); EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; depth = 1; for (;;) { i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1); saved_spec = ef10_filter_entry_spec(table, i); if (saved_spec && ef10_filter_equal(spec, saved_spec) && ef10_filter_same_dest(spec, saved_spec)) { break; } if (depth == EF10_FILTER_SEARCH_LIMIT) { rc = ENOENT; goto fail1; } depth++; } EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; rc = ef10_filter_delete_internal(enp, i); if (rc != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); if (locked) EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } static __checkReturn efx_rc_t efx_mcdi_get_parser_disp_info( __in efx_nic_t *enp, __out_ecount(buffer_length) uint32_t *buffer, __in size_t buffer_length, __out size_t *list_lengthp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)]; size_t matches_count; size_t list_size; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX; MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } matches_count = MCDI_OUT_DWORD(req, GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES); if (req.emr_out_length_used < MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(matches_count)) { rc = EMSGSIZE; goto fail2; } *list_lengthp = matches_count; if (buffer_length < matches_count) { rc = ENOSPC; goto fail3; } /* * Check that the elements in the list in the MCDI response are the size * we expect, so we can just copy them directly. Any conversion of the * flags is handled by the caller. */ EFX_STATIC_ASSERT(sizeof (uint32_t) == MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN); list_size = matches_count * MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN; memcpy(buffer, MCDI_OUT2(req, uint32_t, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES), list_size); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_filter_supported_filters( __in efx_nic_t *enp, __out_ecount(buffer_length) uint32_t *buffer, __in size_t buffer_length, __out size_t *list_lengthp) { size_t mcdi_list_length; size_t list_length; uint32_t i; efx_rc_t rc; uint32_t all_filter_flags = (EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT | EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_INNER_VID | EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_UNKNOWN_MCAST_DST | EFX_FILTER_MATCH_UNKNOWN_UCAST_DST); rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length, &mcdi_list_length); if (rc != 0) { if (rc == ENOSPC) { /* Pass through mcdi_list_length for the list length */ *list_lengthp = mcdi_list_length; } goto fail1; } /* * The static assertions in ef10_filter_init() ensure that the values of * the EFX_FILTER_MATCH flags match those used by MCDI, so they don't * need to be converted. * * In case support is added to MCDI for additional flags, remove any * matches from the list which include flags we don't support. The order * of the matches is preserved as they are ordered from highest to * lowest priority. */ EFSYS_ASSERT(mcdi_list_length <= buffer_length); list_length = 0; for (i = 0; i < mcdi_list_length; i++) { if ((buffer[i] & ~all_filter_flags) == 0) { buffer[list_length] = buffer[i]; list_length++; } } *list_lengthp = list_length; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_unicast( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *addr, __in efx_filter_flags_t filter_flags) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; efx_rc_t rc; /* Insert the filter for the local station address */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]); if (rc != 0) goto fail1; eftp->eft_unicst_filter_count++; EFSYS_ASSERT(eftp->eft_unicst_filter_count <= EFX_EF10_FILTER_UNICAST_FILTERS_MAX); return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_all_unicast( __in efx_nic_t *enp, __in efx_filter_flags_t filter_flags) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; efx_rc_t rc; /* Insert the unknown unicast filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); efx_filter_spec_set_uc_def(&spec); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]); if (rc != 0) goto fail1; eftp->eft_unicst_filter_count++; EFSYS_ASSERT(eftp->eft_unicst_filter_count <= EFX_EF10_FILTER_UNICAST_FILTERS_MAX); return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_multicast_list( __in efx_nic_t *enp, __in boolean_t mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count, __in efx_filter_flags_t filter_flags, __in boolean_t rollback) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; uint8_t addr[6]; uint32_t i; uint32_t filter_index; uint32_t filter_count; efx_rc_t rc; if (mulcst == B_FALSE) count = 0; if (count + (brdcst ? 1 : 0) > EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) { /* Too many MAC addresses */ rc = EINVAL; goto fail1; } /* Insert/renew multicast address list filters */ filter_count = 0; for (i = 0; i < count; i++) { efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, &addrs[i * EFX_MAC_ADDR_LEN]); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &filter_index); if (rc == 0) { eftp->eft_mulcst_filter_indexes[filter_count] = filter_index; filter_count++; } else if (rollback == B_TRUE) { /* Only stop upon failure if told to rollback */ goto rollback; } } if (brdcst == B_TRUE) { /* Insert/renew broadcast address filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); EFX_MAC_BROADCAST_ADDR_SET(addr); efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &filter_index); if (rc == 0) { eftp->eft_mulcst_filter_indexes[filter_count] = filter_index; filter_count++; } else if (rollback == B_TRUE) { /* Only stop upon failure if told to rollback */ goto rollback; } } eftp->eft_mulcst_filter_count = filter_count; eftp->eft_using_all_mulcst = B_FALSE; return (0); rollback: /* Remove any filters we have inserted */ i = filter_count; while (i--) { (void) ef10_filter_delete_internal(enp, eftp->eft_mulcst_filter_indexes[i]); } eftp->eft_mulcst_filter_count = 0; fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_all_multicast( __in efx_nic_t *enp, __in efx_filter_flags_t filter_flags) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; efx_rc_t rc; /* Insert the unknown multicast filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); efx_filter_spec_set_mc_def(&spec); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &eftp->eft_mulcst_filter_indexes[0]); if (rc != 0) goto fail1; eftp->eft_mulcst_filter_count = 1; eftp->eft_using_all_mulcst = B_TRUE; /* * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic. */ return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } +typedef struct ef10_filter_encap_entry_s { + uint16_t ether_type; + efx_tunnel_protocol_t encap_type; + uint32_t inner_frame_match; +} ef10_filter_encap_entry_t; + +#define EF10_ENCAP_FILTER_ENTRY(ipv, encap_type, inner_frame_match) \ + { EFX_ETHER_TYPE_##ipv, EFX_TUNNEL_PROTOCOL_##encap_type, \ + EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_##inner_frame_match } + +static ef10_filter_encap_entry_t ef10_filter_encap_list[] = { + EF10_ENCAP_FILTER_ENTRY(IPV4, VXLAN, UCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV4, VXLAN, MCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV6, VXLAN, UCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV6, VXLAN, MCAST_DST), + + EF10_ENCAP_FILTER_ENTRY(IPV4, GENEVE, UCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV4, GENEVE, MCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV6, GENEVE, UCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV6, GENEVE, MCAST_DST), + + EF10_ENCAP_FILTER_ENTRY(IPV4, NVGRE, UCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV4, NVGRE, MCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV6, NVGRE, UCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV6, NVGRE, MCAST_DST), +}; + +#undef EF10_ENCAP_FILTER_ENTRY + +static __checkReturn efx_rc_t +ef10_filter_insert_encap_filters( + __in efx_nic_t *enp, + __in boolean_t mulcst, + __in efx_filter_flags_t filter_flags) +{ + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + uint32_t i; + efx_rc_t rc; + + EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(ef10_filter_encap_list) <= + EFX_ARRAY_SIZE(table->eft_encap_filter_indexes)); + + /* + * On Medford, full-featured firmware can identify packets as being + * tunnel encapsulated, even if no encapsulated packet offloads are in + * use. When packets are identified as such, ordinary filters are not + * applied, only ones specific to encapsulated packets. Hence we need to + * insert filters for encapsulated packets in order to receive them. + * + * Separate filters need to be inserted for each ether type, + * encapsulation type, and inner frame type (unicast or multicast). To + * keep things simple and reduce the number of filters needed, catch-all + * filters for all combinations of types are inserted, even if + * all_unicst or all_mulcst have not been set. (These catch-all filters + * may well, however, fail to insert on unprivileged functions.) + */ + table->eft_encap_filter_count = 0; + for (i = 0; i < EFX_ARRAY_SIZE(ef10_filter_encap_list); i++) { + efx_filter_spec_t spec; + ef10_filter_encap_entry_t *encap_filter = + &ef10_filter_encap_list[i]; + + /* + * Skip multicast filters if we've not been asked for + * any multicast traffic. + */ + if ((mulcst == B_FALSE) && + (encap_filter->inner_frame_match == + EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST)) + continue; + + efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, + filter_flags, + table->eft_default_rxq); + efx_filter_spec_set_ether_type(&spec, encap_filter->ether_type); + rc = efx_filter_spec_set_encap_type(&spec, + encap_filter->encap_type, + encap_filter->inner_frame_match); + if (rc != 0) + goto fail1; + + rc = ef10_filter_add_internal(enp, &spec, B_TRUE, + &table->eft_encap_filter_indexes[ + table->eft_encap_filter_count]); + if (rc != 0) { + if (rc != EACCES) + goto fail2; + } else { + table->eft_encap_filter_count++; + } + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + static void ef10_filter_remove_old( __in efx_nic_t *enp) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; uint32_t i; for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) { if (ef10_filter_entry_is_auto_old(table, i)) { (void) ef10_filter_delete_internal(enp, i); } } } static __checkReturn efx_rc_t ef10_filter_get_workarounds( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &enp->en_nic_cfg; uint32_t implemented = 0; uint32_t enabled = 0; efx_rc_t rc; rc = efx_mcdi_get_workarounds(enp, &implemented, &enabled); if (rc == 0) { /* Check if chained multicast filter support is enabled */ if (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807) encp->enc_bug26807_workaround = B_TRUE; else encp->enc_bug26807_workaround = B_FALSE; } else if (rc == ENOTSUP) { /* * Firmware is too old to support GET_WORKAROUNDS, and support * for this workaround was implemented later. */ encp->enc_bug26807_workaround = B_FALSE; } else { goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Reconfigure all filters. * If all_unicst and/or all mulcst filters cannot be applied then * return ENOTSUP (Note the filters for the specified addresses are * still applied in this case). */ __checkReturn efx_rc_t ef10_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count) { efx_nic_cfg_t *encp = &enp->en_nic_cfg; ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; efx_filter_flags_t filter_flags; unsigned int i; efx_rc_t all_unicst_rc = 0; efx_rc_t all_mulcst_rc = 0; efx_rc_t rc; if (table->eft_default_rxq == NULL) { /* * Filters direct traffic to the default RXQ, and so cannot be * inserted until it is available. Any currently configured * filters must be removed (ignore errors in case the MC * has rebooted, which removes hardware filters). */ for (i = 0; i < table->eft_unicst_filter_count; i++) { (void) ef10_filter_delete_internal(enp, table->eft_unicst_filter_indexes[i]); } table->eft_unicst_filter_count = 0; for (i = 0; i < table->eft_mulcst_filter_count; i++) { (void) ef10_filter_delete_internal(enp, table->eft_mulcst_filter_indexes[i]); } table->eft_mulcst_filter_count = 0; + for (i = 0; i < table->eft_encap_filter_count; i++) { + (void) ef10_filter_delete_internal(enp, + table->eft_encap_filter_indexes[i]); + } + table->eft_encap_filter_count = 0; + return (0); } if (table->eft_using_rss) filter_flags = EFX_FILTER_FLAG_RX_RSS; else filter_flags = 0; /* Mark old filters which may need to be removed */ for (i = 0; i < table->eft_unicst_filter_count; i++) { ef10_filter_set_entry_auto_old(table, table->eft_unicst_filter_indexes[i]); } for (i = 0; i < table->eft_mulcst_filter_count; i++) { ef10_filter_set_entry_auto_old(table, table->eft_mulcst_filter_indexes[i]); } + for (i = 0; i < table->eft_encap_filter_count; i++) { + ef10_filter_set_entry_auto_old(table, + table->eft_encap_filter_indexes[i]); + } /* * Insert or renew unicast filters. * * Frimware does not perform chaining on unicast filters. As traffic is * therefore only delivered to the first matching filter, we should * always insert the specific filter for our MAC address, to try and * ensure we get that traffic. * * (If the filter for our MAC address has already been inserted by * another function, we won't receive traffic sent to us, even if we * insert a unicast mismatch filter. To prevent traffic stealing, this * therefore relies on the privilege model only allowing functions to * insert filters for their own MAC address unless explicitly given * additional privileges by the user. This also means that, even on a * priviliged function, inserting a unicast mismatch filter may not * catch all traffic in multi PCI function scenarios.) */ table->eft_unicst_filter_count = 0; rc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags); if (all_unicst || (rc != 0)) { all_unicst_rc = ef10_filter_insert_all_unicast(enp, filter_flags); if ((rc != 0) && (all_unicst_rc != 0)) goto fail1; } /* * WORKAROUND_BUG26807 controls firmware support for chained multicast * filters, and can only be enabled or disabled when the hardware filter * table is empty. * * Chained multicast filters require support from the datapath firmware, * and may not be available (e.g. low-latency variants or old Huntington * firmware). * * Firmware will reset (FLR) functions which have inserted filters in * the hardware filter table when the workaround is enabled/disabled. * Functions without any hardware filters are not reset. * * Re-check if the workaround is enabled after adding unicast hardware * filters. This ensures that encp->enc_bug26807_workaround matches the * firmware state, and that later changes to enable/disable the * workaround will result in this function seeing a reset (FLR). * * In common-code drivers, we only support multiple PCI function * scenarios with firmware that supports multicast chaining, so we can * assume it is enabled for such cases and hence simplify the filter * insertion logic. Firmware that does not support multicast chaining * does not support multiple PCI function configurations either, so * filter insertion is much simpler and the same strategies can still be * used. */ if ((rc = ef10_filter_get_workarounds(enp)) != 0) goto fail2; if ((table->eft_using_all_mulcst != all_mulcst) && (encp->enc_bug26807_workaround == B_TRUE)) { /* * Multicast filter chaining is enabled, so traffic that matches * more than one multicast filter will be replicated and * delivered to multiple recipients. To avoid this duplicate * delivery, remove old multicast filters before inserting new * multicast filters. */ ef10_filter_remove_old(enp); } /* Insert or renew multicast filters */ if (all_mulcst == B_TRUE) { /* * Insert the all multicast filter. If that fails, try to insert * all of our multicast filters (but without rollback on * failure). */ all_mulcst_rc = ef10_filter_insert_all_multicast(enp, filter_flags); if (all_mulcst_rc != 0) { rc = ef10_filter_insert_multicast_list(enp, B_TRUE, brdcst, addrs, count, filter_flags, B_FALSE); if (rc != 0) goto fail3; } } else { /* * Insert filters for multicast addresses. * If any insertion fails, then rollback and try to insert the * all multicast filter instead. * If that also fails, try to insert all of the multicast * filters (but without rollback on failure). */ rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst, addrs, count, filter_flags, B_TRUE); if (rc != 0) { if ((table->eft_using_all_mulcst == B_FALSE) && (encp->enc_bug26807_workaround == B_TRUE)) { /* * Multicast filter chaining is on, so remove * old filters before inserting the multicast * all filter to avoid duplicate delivery caused * by packets matching multiple filters. */ ef10_filter_remove_old(enp); } rc = ef10_filter_insert_all_multicast(enp, filter_flags); if (rc != 0) { rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst, addrs, count, filter_flags, B_FALSE); if (rc != 0) goto fail4; } } + } + + if (encp->enc_tunnel_encapsulations_supported != 0) { + /* Try to insert filters for encapsulated packets. */ + (void) ef10_filter_insert_encap_filters(enp, + mulcst || all_mulcst || brdcst, + filter_flags); } /* Remove old filters which were not renewed */ ef10_filter_remove_old(enp); /* report if any optional flags were rejected */ if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) || ((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) { rc = ENOTSUP; } return (rc); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); /* Clear auto old flags */ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) { if (ef10_filter_entry_is_auto_old(table, i)) { ef10_filter_set_entry_not_auto_old(table, i); } } return (rc); } void ef10_filter_get_default_rxq( __in efx_nic_t *enp, __out efx_rxq_t **erpp, __out boolean_t *using_rss) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; *erpp = table->eft_default_rxq; *using_rss = table->eft_using_rss; } void ef10_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; #if EFSYS_OPT_RX_SCALE EFSYS_ASSERT((using_rss == B_FALSE) || (enp->en_rss_context != EF10_RSS_CONTEXT_INVALID)); table->eft_using_rss = using_rss; #else EFSYS_ASSERT(using_rss == B_FALSE); table->eft_using_rss = B_FALSE; #endif table->eft_default_rxq = erp; } void ef10_filter_default_rxq_clear( __in efx_nic_t *enp) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; table->eft_default_rxq = NULL; table->eft_using_rss = B_FALSE; } #endif /* EFSYS_OPT_FILTER */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: stable/10/sys/dev/sfxge/common/ef10_impl.h =================================================================== --- stable/10/sys/dev/sfxge/common/ef10_impl.h (revision 342486) +++ stable/10/sys/dev/sfxge/common/ef10_impl.h (revision 342487) @@ -1,1139 +1,1149 @@ /*- * Copyright (c) 2015-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_EF10_IMPL_H #define _SYS_EF10_IMPL_H #ifdef __cplusplus extern "C" { #endif #if (EFSYS_OPT_HUNTINGTON && EFSYS_OPT_MEDFORD) #define EF10_MAX_PIOBUF_NBUFS MAX(HUNT_PIOBUF_NBUFS, MEDFORD_PIOBUF_NBUFS) #elif EFSYS_OPT_HUNTINGTON #define EF10_MAX_PIOBUF_NBUFS HUNT_PIOBUF_NBUFS #elif EFSYS_OPT_MEDFORD #define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS #endif /* * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could * possibly be increased, or the write size reported by newer firmware used * instead. */ #define EF10_NVRAM_CHUNK 0x80 /* Alignment requirement for value written to RX WPTR: * the WPTR must be aligned to an 8 descriptor boundary */ #define EF10_RX_WPTR_ALIGN 8 /* * Max byte offset into the packet the TCP header must start for the hardware * to be able to parse the packet correctly. */ #define EF10_TCP_HEADER_OFFSET_LIMIT 208 /* Invalid RSS context handle */ #define EF10_RSS_CONTEXT_INVALID (0xffffffff) /* EV */ __checkReturn efx_rc_t ef10_ev_init( __in efx_nic_t *enp); void ef10_ev_fini( __in efx_nic_t *enp); __checkReturn efx_rc_t ef10_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint32_t us, __in uint32_t flags, __in efx_evq_t *eep); void ef10_ev_qdestroy( __in efx_evq_t *eep); __checkReturn efx_rc_t ef10_ev_qprime( __in efx_evq_t *eep, __in unsigned int count); void ef10_ev_qpost( __in efx_evq_t *eep, __in uint16_t data); __checkReturn efx_rc_t ef10_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us); #if EFSYS_OPT_QSTATS void ef10_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ void ef10_ev_rxlabel_init( __in efx_evq_t *eep, __in efx_rxq_t *erp, __in unsigned int label); void ef10_ev_rxlabel_fini( __in efx_evq_t *eep, __in unsigned int label); /* INTR */ __checkReturn efx_rc_t ef10_intr_init( __in efx_nic_t *enp, __in efx_intr_type_t type, __in efsys_mem_t *esmp); void ef10_intr_enable( __in efx_nic_t *enp); void ef10_intr_disable( __in efx_nic_t *enp); void ef10_intr_disable_unlocked( __in efx_nic_t *enp); __checkReturn efx_rc_t ef10_intr_trigger( __in efx_nic_t *enp, __in unsigned int level); void ef10_intr_status_line( __in efx_nic_t *enp, __out boolean_t *fatalp, __out uint32_t *qmaskp); void ef10_intr_status_message( __in efx_nic_t *enp, __in unsigned int message, __out boolean_t *fatalp); void ef10_intr_fatal( __in efx_nic_t *enp); void ef10_intr_fini( __in efx_nic_t *enp); /* NIC */ extern __checkReturn efx_rc_t ef10_nic_probe( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_nic_set_drv_limits( __inout efx_nic_t *enp, __in efx_drv_limits_t *edlp); extern __checkReturn efx_rc_t ef10_nic_get_vi_pool( __in efx_nic_t *enp, __out uint32_t *vi_countp); extern __checkReturn efx_rc_t ef10_nic_get_bar_region( __in efx_nic_t *enp, __in efx_nic_region_t region, __out uint32_t *offsetp, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nic_reset( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_nic_init( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG extern __checkReturn efx_rc_t ef10_nic_register_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern void ef10_nic_fini( __in efx_nic_t *enp); extern void ef10_nic_unprobe( __in efx_nic_t *enp); /* MAC */ extern __checkReturn efx_rc_t ef10_mac_poll( __in efx_nic_t *enp, __out efx_link_mode_t *link_modep); extern __checkReturn efx_rc_t ef10_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp); extern __checkReturn efx_rc_t ef10_mac_addr_set( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mac_pdu_set( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mac_pdu_get( __in efx_nic_t *enp, __out size_t *pdu); extern __checkReturn efx_rc_t ef10_mac_reconfigure( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mac_multicast_list_set( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mac_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss); extern void ef10_mac_filter_default_rxq_clear( __in efx_nic_t *enp); #if EFSYS_OPT_LOOPBACK extern __checkReturn efx_rc_t ef10_mac_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t loopback_type); #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS extern __checkReturn efx_rc_t ef10_mac_stats_get_mask( __in efx_nic_t *enp, __inout_bcount(mask_size) uint32_t *maskp, __in size_t mask_size); extern __checkReturn efx_rc_t ef10_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp); #endif /* EFSYS_OPT_MAC_STATS */ /* MCDI */ #if EFSYS_OPT_MCDI extern __checkReturn efx_rc_t ef10_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *mtp); extern void ef10_mcdi_fini( __in efx_nic_t *enp); extern void ef10_mcdi_send_request( __in efx_nic_t *enp, __in_bcount(hdr_len) void *hdrp, __in size_t hdr_len, __in_bcount(sdu_len) void *sdup, __in size_t sdu_len); extern __checkReturn boolean_t ef10_mcdi_poll_response( __in efx_nic_t *enp); extern void ef10_mcdi_read_response( __in efx_nic_t *enp, __out_bcount(length) void *bufferp, __in size_t offset, __in size_t length); extern efx_rc_t ef10_mcdi_poll_reboot( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mcdi_feature_supported( __in efx_nic_t *enp, __in efx_mcdi_feature_id_t id, __out boolean_t *supportedp); extern void ef10_mcdi_get_timeout( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __out uint32_t *timeoutp); #endif /* EFSYS_OPT_MCDI */ /* NVRAM */ #if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD extern __checkReturn efx_rc_t ef10_nvram_buf_read_tlv( __in efx_nic_t *enp, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nvram_buf_write_tlv( __inout_bcount(partn_size) caddr_t partn_data, __in size_t partn_size, __in uint32_t tag, __in_bcount(tag_size) caddr_t tag_data, __in size_t tag_size, __out size_t *total_lengthp); extern __checkReturn efx_rc_t ef10_nvram_partn_read_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nvram_partn_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_write_segment_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t all_segments); extern __checkReturn efx_rc_t ef10_nvram_partn_lock( __in efx_nic_t *enp, __in uint32_t partn); extern __checkReturn efx_rc_t ef10_nvram_partn_unlock( __in efx_nic_t *enp, __in uint32_t partn, __out_opt uint32_t *resultp); #endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */ #if EFSYS_OPT_NVRAM #if EFSYS_OPT_DIAG extern __checkReturn efx_rc_t ef10_nvram_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern __checkReturn efx_rc_t ef10_nvram_type_to_partn( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *partnp); extern __checkReturn efx_rc_t ef10_nvram_partn_size( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nvram_partn_rw_start( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *chunk_sizep); extern __checkReturn efx_rc_t ef10_nvram_partn_read_mode( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size, __in uint32_t mode); extern __checkReturn efx_rc_t ef10_nvram_partn_read( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_erase( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_write( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_rw_finish( __in efx_nic_t *enp, __in uint32_t partn); extern __checkReturn efx_rc_t ef10_nvram_partn_get_version( __in efx_nic_t *enp, __in uint32_t partn, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]); extern __checkReturn efx_rc_t ef10_nvram_partn_set_version( __in efx_nic_t *enp, __in uint32_t partn, __in_ecount(4) uint16_t version[4]); extern __checkReturn efx_rc_t ef10_nvram_buffer_validate( __in efx_nic_t *enp, __in uint32_t partn, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size); extern __checkReturn efx_rc_t ef10_nvram_buffer_create( __in efx_nic_t *enp, __in uint16_t partn_type, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size); extern __checkReturn efx_rc_t ef10_nvram_buffer_find_item_start( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __out uint32_t *startp ); extern __checkReturn efx_rc_t ef10_nvram_buffer_find_end( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *endp ); extern __checkReturn __success(return != B_FALSE) boolean_t ef10_nvram_buffer_find_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, __out uint32_t *lengthp ); extern __checkReturn efx_rc_t ef10_nvram_buffer_get_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(item_max_size, *lengthp) caddr_t itemp, __in size_t item_max_size, __out uint32_t *lengthp ); extern __checkReturn efx_rc_t ef10_nvram_buffer_insert_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, __out uint32_t *lengthp ); extern __checkReturn efx_rc_t ef10_nvram_buffer_delete_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end ); extern __checkReturn efx_rc_t ef10_nvram_buffer_finish( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ); #endif /* EFSYS_OPT_NVRAM */ /* PHY */ typedef struct ef10_link_state_s { uint32_t els_adv_cap_mask; uint32_t els_lp_cap_mask; unsigned int els_fcntl; efx_link_mode_t els_link_mode; #if EFSYS_OPT_LOOPBACK efx_loopback_type_t els_loopback; #endif boolean_t els_mac_up; } ef10_link_state_t; extern void ef10_phy_link_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_link_mode_t *link_modep); extern __checkReturn efx_rc_t ef10_phy_get_link( __in efx_nic_t *enp, __out ef10_link_state_t *elsp); extern __checkReturn efx_rc_t ef10_phy_power( __in efx_nic_t *enp, __in boolean_t on); extern __checkReturn efx_rc_t ef10_phy_reconfigure( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_phy_verify( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip); #if EFSYS_OPT_PHY_STATS extern __checkReturn efx_rc_t ef10_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_BIST extern __checkReturn efx_rc_t ef10_bist_enable_offline( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type); extern __checkReturn efx_rc_t ef10_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt __drv_when(count > 0, __notnull) uint32_t *value_maskp, __out_ecount_opt(count) __drv_when(count > 0, __notnull) unsigned long *valuesp, __in size_t count); extern void ef10_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type); #endif /* EFSYS_OPT_BIST */ /* TX */ extern __checkReturn efx_rc_t ef10_tx_init( __in efx_nic_t *enp); extern void ef10_tx_fini( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __in efx_txq_t *etp, __out unsigned int *addedp); extern void ef10_tx_qdestroy( __in efx_txq_t *etp); extern __checkReturn efx_rc_t ef10_tx_qpost( __in efx_txq_t *etp, __in_ecount(n) efx_buffer_t *eb, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); extern void ef10_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed); extern __checkReturn efx_rc_t ef10_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns); extern __checkReturn efx_rc_t ef10_tx_qflush( __in efx_txq_t *etp); extern void ef10_tx_qenable( __in efx_txq_t *etp); extern __checkReturn efx_rc_t ef10_tx_qpio_enable( __in efx_txq_t *etp); extern void ef10_tx_qpio_disable( __in efx_txq_t *etp); extern __checkReturn efx_rc_t ef10_tx_qpio_write( __in efx_txq_t *etp, __in_ecount(buf_length) uint8_t *buffer, __in size_t buf_length, __in size_t pio_buf_offset); extern __checkReturn efx_rc_t ef10_tx_qpio_post( __in efx_txq_t *etp, __in size_t pkt_length, __in unsigned int completed, __inout unsigned int *addedp); extern __checkReturn efx_rc_t ef10_tx_qdesc_post( __in efx_txq_t *etp, __in_ecount(n) efx_desc_t *ed, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); extern void ef10_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp); extern void ef10_tx_qdesc_tso_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint8_t tcp_flags, __out efx_desc_t *edp); extern void ef10_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, __in int count); extern void ef10_tx_qdesc_vlantci_create( __in efx_txq_t *etp, __in uint16_t vlan_tci, __out efx_desc_t *edp); #if EFSYS_OPT_QSTATS extern void ef10_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ typedef uint32_t efx_piobuf_handle_t; #define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t) -1) extern __checkReturn efx_rc_t ef10_nic_pio_alloc( __inout efx_nic_t *enp, __out uint32_t *bufnump, __out efx_piobuf_handle_t *handlep, __out uint32_t *blknump, __out uint32_t *offsetp, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nic_pio_free( __inout efx_nic_t *enp, __in uint32_t bufnum, __in uint32_t blknum); extern __checkReturn efx_rc_t ef10_nic_pio_link( __inout efx_nic_t *enp, __in uint32_t vi_index, __in efx_piobuf_handle_t handle); extern __checkReturn efx_rc_t ef10_nic_pio_unlink( __inout efx_nic_t *enp, __in uint32_t vi_index); /* VPD */ #if EFSYS_OPT_VPD extern __checkReturn efx_rc_t ef10_vpd_init( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_vpd_size( __in efx_nic_t *enp, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp); extern __checkReturn efx_rc_t ef10_vpd_set( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp); extern __checkReturn efx_rc_t ef10_vpd_next( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp); extern __checkReturn efx_rc_t ef10_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern void ef10_vpd_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_VPD */ /* RX */ extern __checkReturn efx_rc_t ef10_rx_init( __in efx_nic_t *enp); #if EFSYS_OPT_RX_SCATTER extern __checkReturn efx_rc_t ef10_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size); #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE extern __checkReturn efx_rc_t ef10_rx_scale_mode_set( __in efx_nic_t *enp, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert); extern __checkReturn efx_rc_t ef10_rx_scale_key_set( __in efx_nic_t *enp, __in_ecount(n) uint8_t *key, __in size_t n); extern __checkReturn efx_rc_t ef10_rx_scale_tbl_set( __in efx_nic_t *enp, __in_ecount(n) unsigned int *table, __in size_t n); extern __checkReturn uint32_t ef10_rx_prefix_hash( __in efx_nic_t *enp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer); #endif /* EFSYS_OPT_RX_SCALE */ extern __checkReturn efx_rc_t ef10_rx_prefix_pktlen( __in efx_nic_t *enp, __in uint8_t *buffer, __out uint16_t *lengthp); extern void ef10_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added); extern void ef10_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp); extern __checkReturn efx_rc_t ef10_rx_qflush( __in efx_rxq_t *erp); extern void ef10_rx_qenable( __in efx_rxq_t *erp); extern __checkReturn efx_rc_t ef10_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __in efx_rxq_t *erp); extern void ef10_rx_qdestroy( __in efx_rxq_t *erp); extern void ef10_rx_fini( __in efx_nic_t *enp); #if EFSYS_OPT_FILTER typedef struct ef10_filter_handle_s { uint32_t efh_lo; uint32_t efh_hi; } ef10_filter_handle_t; typedef struct ef10_filter_entry_s { uintptr_t efe_spec; /* pointer to filter spec plus busy bit */ ef10_filter_handle_t efe_handle; } ef10_filter_entry_t; /* * BUSY flag indicates that an update is in progress. * AUTO_OLD flag is used to mark and sweep MAC packet filters. */ #define EFX_EF10_FILTER_FLAG_BUSY 1U #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2U #define EFX_EF10_FILTER_FLAGS 3U /* * Size of the hash table used by the driver. Doesn't need to be the * same size as the hardware's table. */ #define EFX_EF10_FILTER_TBL_ROWS 8192 /* Only need to allow for one directed and one unknown unicast filter */ #define EFX_EF10_FILTER_UNICAST_FILTERS_MAX 2 /* Allow for the broadcast address to be added to the multicast list */ #define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1) +/* + * For encapsulated packets, there is one filter each for each combination of + * IPv4 or IPv6 outer frame, VXLAN, GENEVE or NVGRE packet type, and unicast or + * multicast inner frames. + */ +#define EFX_EF10_FILTER_ENCAP_FILTERS_MAX 12 + typedef struct ef10_filter_table_s { ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS]; efx_rxq_t *eft_default_rxq; boolean_t eft_using_rss; uint32_t eft_unicst_filter_indexes[ EFX_EF10_FILTER_UNICAST_FILTERS_MAX]; uint32_t eft_unicst_filter_count; uint32_t eft_mulcst_filter_indexes[ EFX_EF10_FILTER_MULTICAST_FILTERS_MAX]; uint32_t eft_mulcst_filter_count; boolean_t eft_using_all_mulcst; + uint32_t eft_encap_filter_indexes[ + EFX_EF10_FILTER_ENCAP_FILTERS_MAX]; + uint32_t eft_encap_filter_count; } ef10_filter_table_t; __checkReturn efx_rc_t ef10_filter_init( __in efx_nic_t *enp); void ef10_filter_fini( __in efx_nic_t *enp); __checkReturn efx_rc_t ef10_filter_restore( __in efx_nic_t *enp); __checkReturn efx_rc_t ef10_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace); __checkReturn efx_rc_t ef10_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec); extern __checkReturn efx_rc_t ef10_filter_supported_filters( __in efx_nic_t *enp, __out_ecount(buffer_length) uint32_t *buffer, __in size_t buffer_length, __out size_t *list_lengthp); extern __checkReturn efx_rc_t ef10_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count); extern void ef10_filter_get_default_rxq( __in efx_nic_t *enp, __out efx_rxq_t **erpp, __out boolean_t *using_rss); extern void ef10_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss); extern void ef10_filter_default_rxq_clear( __in efx_nic_t *enp); #endif /* EFSYS_OPT_FILTER */ extern __checkReturn efx_rc_t efx_mcdi_get_function_info( __in efx_nic_t *enp, __out uint32_t *pfp, __out_opt uint32_t *vfp); extern __checkReturn efx_rc_t efx_mcdi_privilege_mask( __in efx_nic_t *enp, __in uint32_t pf, __in uint32_t vf, __out uint32_t *maskp); extern __checkReturn efx_rc_t efx_mcdi_get_port_assignment( __in efx_nic_t *enp, __out uint32_t *portp); extern __checkReturn efx_rc_t efx_mcdi_get_port_modes( __in efx_nic_t *enp, __out uint32_t *modesp, __out_opt uint32_t *current_modep); extern __checkReturn efx_rc_t ef10_nic_get_port_mode_bandwidth( __in uint32_t port_mode, __out uint32_t *bandwidth_mbpsp); extern __checkReturn efx_rc_t efx_mcdi_get_mac_address_pf( __in efx_nic_t *enp, __out_ecount_opt(6) uint8_t mac_addrp[6]); extern __checkReturn efx_rc_t efx_mcdi_get_mac_address_vf( __in efx_nic_t *enp, __out_ecount_opt(6) uint8_t mac_addrp[6]); extern __checkReturn efx_rc_t efx_mcdi_get_clock( __in efx_nic_t *enp, __out uint32_t *sys_freqp, __out uint32_t *dpcpu_freqp); extern __checkReturn efx_rc_t efx_mcdi_get_vector_cfg( __in efx_nic_t *enp, __out_opt uint32_t *vec_basep, __out_opt uint32_t *pf_nvecp, __out_opt uint32_t *vf_nvecp); extern __checkReturn efx_rc_t ef10_get_datapath_caps( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_get_privilege_mask( __in efx_nic_t *enp, __out uint32_t *maskp); extern __checkReturn efx_rc_t ef10_external_port_mapping( __in efx_nic_t *enp, __in uint32_t port, __out uint8_t *external_portp); #ifdef __cplusplus } #endif #endif /* _SYS_EF10_IMPL_H */ Index: stable/10 =================================================================== --- stable/10 (revision 342486) +++ stable/10 (revision 342487) Property changes on: stable/10 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r340804