Index: stable/10/sys/dev/sfxge/common/efsys.h =================================================================== --- stable/10/sys/dev/sfxge/common/efsys.h (revision 293926) +++ stable/10/sys/dev/sfxge/common/efsys.h (revision 293927) @@ -1,1227 +1,1228 @@ /*- * Copyright (c) 2010-2015 Solarflare Communications Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_EFSYS_H #define _SYS_EFSYS_H #ifdef __cplusplus extern "C" { #endif #include #include #include #include #include #include #include #include #include #include #include #include #define EFSYS_HAS_UINT64 1 #if defined(__x86_64__) #define EFSYS_USE_UINT64 1 #else #define EFSYS_USE_UINT64 0 #endif #define EFSYS_HAS_SSE2_M128 0 #if _BYTE_ORDER == _BIG_ENDIAN #define EFSYS_IS_BIG_ENDIAN 1 #define EFSYS_IS_LITTLE_ENDIAN 0 #elif _BYTE_ORDER == _LITTLE_ENDIAN #define EFSYS_IS_BIG_ENDIAN 0 #define EFSYS_IS_LITTLE_ENDIAN 1 #endif #include "efx_types.h" /* Common code requires this */ #if __FreeBSD_version < 800068 #define memmove(d, s, l) bcopy(s, d, l) #endif /* FreeBSD equivalents of Solaris things */ #ifndef _NOTE #define _NOTE(s) #endif #ifndef B_FALSE #define B_FALSE FALSE #endif #ifndef B_TRUE #define B_TRUE TRUE #endif #ifndef IS_P2ALIGNED #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0) #endif #ifndef P2ROUNDUP #define P2ROUNDUP(x, align) (-(-(x) & -(align))) #endif #ifndef P2ALIGN #define P2ALIGN(_x, _a) ((_x) & -(_a)) #endif #ifndef IS2P #define ISP2(x) (((x) & ((x) - 1)) == 0) #endif #if defined(__x86_64__) && __FreeBSD_version >= 1000000 #define SFXGE_USE_BUS_SPACE_8 1 #if !defined(bus_space_read_stream_8) #define bus_space_read_stream_8(t, h, o) \ bus_space_read_8((t), (h), (o)) #define bus_space_write_stream_8(t, h, o, v) \ bus_space_write_8((t), (h), (o), (v)) #endif #endif #define ENOTACTIVE EINVAL /* Memory type to use on FreeBSD */ MALLOC_DECLARE(M_SFXGE); /* Machine dependend prefetch wrappers */ #if defined(__i386__) || defined(__amd64__) static __inline void prefetch_read_many(void *addr) { __asm__( "prefetcht0 (%0)" : : "r" (addr)); } static __inline void prefetch_read_once(void *addr) { __asm__( "prefetchnta (%0)" : : "r" (addr)); } #elif defined(__sparc64__) static __inline void prefetch_read_many(void *addr) { __asm__( "prefetch [%0], 0" : : "r" (addr)); } static __inline void prefetch_read_once(void *addr) { __asm__( "prefetch [%0], 1" : : "r" (addr)); } #else static __inline void prefetch_read_many(void *addr) { } static __inline void prefetch_read_once(void *addr) { } #endif #if defined(__i386__) || defined(__amd64__) #include #include #endif static __inline void sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf *m, bus_dma_segment_t *seg) { #if defined(__i386__) || defined(__amd64__) seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t)); seg->ds_len = m->m_len; #else int nsegstmp; bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0); #endif } /* Modifiers used for Windows builds */ #define __in #define __in_opt #define __in_ecount(_n) #define __in_ecount_opt(_n) #define __in_bcount(_n) #define __in_bcount_opt(_n) #define __out #define __out_opt #define __out_ecount(_n) #define __out_ecount_opt(_n) #define __out_bcount(_n) #define __out_bcount_opt(_n) #define __deref_out #define __inout #define __inout_opt #define __inout_ecount(_n) #define __inout_ecount_opt(_n) #define __inout_bcount(_n) #define __inout_bcount_opt(_n) #define __inout_bcount_full_opt(_n) #define __deref_out_bcount_opt(n) #define __checkReturn +#define __success(_x) #define __drv_when(_p, _c) /* Code inclusion options */ #define EFSYS_OPT_NAMES 1 #define EFSYS_OPT_FALCON 0 #define EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE 0 #define EFSYS_OPT_SIENA 1 #define EFSYS_OPT_HUNTINGTON 1 #ifdef DEBUG #define EFSYS_OPT_CHECK_REG 1 #else #define EFSYS_OPT_CHECK_REG 0 #endif #define EFSYS_OPT_MCDI 1 #define EFSYS_OPT_MAC_FALCON_GMAC 0 #define EFSYS_OPT_MAC_FALCON_XMAC 0 #define EFSYS_OPT_MAC_STATS 1 #define EFSYS_OPT_LOOPBACK 0 #define EFSYS_OPT_MON_NULL 0 #define EFSYS_OPT_MON_LM87 0 #define EFSYS_OPT_MON_MAX6647 0 #define EFSYS_OPT_MON_MCDI 0 #define EFSYS_OPT_MON_STATS 0 #define EFSYS_OPT_PHY_NULL 0 #define EFSYS_OPT_PHY_QT2022C2 0 #define EFSYS_OPT_PHY_SFX7101 0 #define EFSYS_OPT_PHY_TXC43128 0 #define EFSYS_OPT_PHY_SFT9001 0 #define EFSYS_OPT_PHY_QT2025C 0 #define EFSYS_OPT_PHY_STATS 1 #define EFSYS_OPT_PHY_PROPS 0 #define EFSYS_OPT_PHY_BIST 0 #define EFSYS_OPT_BIST 1 #define EFSYS_OPT_PHY_LED_CONTROL 1 #define EFSYS_OPT_PHY_FLAGS 0 #define EFSYS_OPT_VPD 1 #define EFSYS_OPT_NVRAM 1 #define EFSYS_OPT_NVRAM_FALCON_BOOTROM 0 #define EFSYS_OPT_NVRAM_SFT9001 0 #define EFSYS_OPT_NVRAM_SFX7101 0 #define EFSYS_OPT_BOOTCFG 0 #define EFSYS_OPT_PCIE_TUNE 0 #define EFSYS_OPT_DIAG 0 #define EFSYS_OPT_WOL 1 #define EFSYS_OPT_RX_SCALE 1 #define EFSYS_OPT_QSTATS 1 #define EFSYS_OPT_FILTER 1 #define EFSYS_OPT_MCAST_FILTER_LIST 1 #define EFSYS_OPT_RX_SCATTER 0 #define EFSYS_OPT_RX_HDR_SPLIT 0 #define EFSYS_OPT_EV_PREFETCH 0 #define EFSYS_OPT_DECODE_INTR_FATAL 1 /* ID */ typedef struct __efsys_identifier_s efsys_identifier_t; /* PROBE */ #ifndef KDTRACE_HOOKS #define EFSYS_PROBE(_name) #define EFSYS_PROBE1(_name, _type1, _arg1) #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3) #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4) #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5) #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6) #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6, _type7, _arg7) #else /* KDTRACE_HOOKS */ #define EFSYS_PROBE(_name) \ DTRACE_PROBE(_name) #define EFSYS_PROBE1(_name, _type1, _arg1) \ DTRACE_PROBE1(_name, _type1, _arg1) #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \ DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2) #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3) \ DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3) #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4) \ DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4) #ifdef DTRACE_PROBE5 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5) \ DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5) #else #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5) \ DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4) #endif #ifdef DTRACE_PROBE6 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6) \ DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6) #else #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6) \ EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5) #endif #ifdef DTRACE_PROBE7 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6, _type7, _arg7) \ DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6, _type7, _arg7) #else #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6, _type7, _arg7) \ EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6) #endif #endif /* KDTRACE_HOOKS */ /* DMA */ typedef uint64_t efsys_dma_addr_t; typedef struct efsys_mem_s { bus_dma_tag_t esm_tag; bus_dmamap_t esm_map; caddr_t esm_base; efsys_dma_addr_t esm_addr; } efsys_mem_t; #define EFSYS_MEM_ZERO(_esmp, _size) \ do { \ (void) memset((_esmp)->esm_base, 0, (_size)); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_MEM_READD(_esmp, _offset, _edp) \ do { \ uint32_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ ("not power of 2 aligned")); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ (_edp)->ed_u32[0] = *addr; \ \ EFSYS_PROBE2(mem_readd, unsigned int, (_offset), \ uint32_t, (_edp)->ed_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #if defined(__x86_64__) #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ do { \ uint64_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ (_eqp)->eq_u64[0] = *addr; \ \ EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ do { \ uint32_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ (_eqp)->eq_u32[0] = *addr++; \ (_eqp)->eq_u32[1] = *addr; \ \ EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #endif #if defined(__x86_64__) #define EFSYS_MEM_READO(_esmp, _offset, _eop) \ do { \ uint64_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ ("not power of 2 aligned")); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ (_eop)->eo_u64[0] = *addr++; \ (_eop)->eo_u64[1] = *addr; \ \ EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFSYS_MEM_READO(_esmp, _offset, _eop) \ do { \ uint32_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ ("not power of 2 aligned")); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ (_eop)->eo_u32[0] = *addr++; \ (_eop)->eo_u32[1] = *addr++; \ (_eop)->eo_u32[2] = *addr++; \ (_eop)->eo_u32[3] = *addr; \ \ EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #endif #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \ do { \ uint32_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ ("not power of 2 aligned")); \ \ EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \ uint32_t, (_edp)->ed_u32[0]); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ *addr = (_edp)->ed_u32[0]; \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #if defined(__x86_64__) #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ do { \ uint64_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ *addr = (_eqp)->eq_u64[0]; \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ do { \ uint32_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ *addr++ = (_eqp)->eq_u32[0]; \ *addr = (_eqp)->eq_u32[1]; \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #endif #if defined(__x86_64__) #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ do { \ uint64_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ ("not power of 2 aligned")); \ \ EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ *addr++ = (_eop)->eo_u64[0]; \ *addr = (_eop)->eo_u64[1]; \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ do { \ uint32_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ ("not power of 2 aligned")); \ \ EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ *addr++ = (_eop)->eo_u32[0]; \ *addr++ = (_eop)->eo_u32[1]; \ *addr++ = (_eop)->eo_u32[2]; \ *addr = (_eop)->eo_u32[3]; \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #endif #define EFSYS_MEM_ADDR(_esmp) \ ((_esmp)->esm_addr) #define EFSYS_MEM_IS_NULL(_esmp) \ ((_esmp)->esm_base == NULL) /* BAR */ #define SFXGE_LOCK_NAME_MAX 16 typedef struct efsys_bar_s { struct mtx esb_lock; char esb_lock_name[SFXGE_LOCK_NAME_MAX]; bus_space_tag_t esb_tag; bus_space_handle_t esb_handle; int esb_rid; struct resource *esb_res; } efsys_bar_t; #define SFXGE_BAR_LOCK_INIT(_esbp, _ifname) \ do { \ snprintf((_esbp)->esb_lock_name, \ sizeof((_esbp)->esb_lock_name), \ "%s:bar", (_ifname)); \ mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name, \ NULL, MTX_DEF); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define SFXGE_BAR_LOCK_DESTROY(_esbp) \ mtx_destroy(&(_esbp)->esb_lock) #define SFXGE_BAR_LOCK(_esbp) \ mtx_lock(&(_esbp)->esb_lock) #define SFXGE_BAR_UNLOCK(_esbp) \ mtx_unlock(&(_esbp)->esb_lock) #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ ("not power of 2 aligned")); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ SFXGE_BAR_LOCK(_esbp); \ \ (_edp)->ed_u32[0] = bus_space_read_stream_4( \ (_esbp)->esb_tag, (_esbp)->esb_handle, \ (_offset)); \ \ EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \ uint32_t, (_edp)->ed_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ SFXGE_BAR_UNLOCK(_esbp); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #if defined(SFXGE_USE_BUS_SPACE_8) #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ SFXGE_BAR_LOCK(_esbp); \ \ (_eqp)->eq_u64[0] = bus_space_read_stream_8( \ (_esbp)->esb_tag, (_esbp)->esb_handle, \ (_offset)); \ \ EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ \ SFXGE_BAR_UNLOCK(_esbp); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ ("not power of 2 aligned")); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ SFXGE_BAR_LOCK(_esbp); \ \ (_eop)->eo_u64[0] = bus_space_read_stream_8( \ (_esbp)->esb_tag, (_esbp)->esb_handle, \ (_offset)); \ (_eop)->eo_u64[1] = bus_space_read_stream_8( \ (_esbp)->esb_tag, (_esbp)->esb_handle, \ (_offset) + 8); \ \ EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ SFXGE_BAR_UNLOCK(_esbp); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ SFXGE_BAR_LOCK(_esbp); \ \ (_eqp)->eq_u32[0] = bus_space_read_stream_4( \ (_esbp)->esb_tag, (_esbp)->esb_handle, \ (_offset)); \ (_eqp)->eq_u32[1] = bus_space_read_stream_4( \ (_esbp)->esb_tag, (_esbp)->esb_handle, \ (_offset) + 4); \ \ EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ \ SFXGE_BAR_UNLOCK(_esbp); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ ("not power of 2 aligned")); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ SFXGE_BAR_LOCK(_esbp); \ \ (_eop)->eo_u32[0] = bus_space_read_stream_4( \ (_esbp)->esb_tag, (_esbp)->esb_handle, \ (_offset)); \ (_eop)->eo_u32[1] = bus_space_read_stream_4( \ (_esbp)->esb_tag, (_esbp)->esb_handle, \ (_offset) + 4); \ (_eop)->eo_u32[2] = bus_space_read_stream_4( \ (_esbp)->esb_tag, (_esbp)->esb_handle, \ (_offset) + 8); \ (_eop)->eo_u32[3] = bus_space_read_stream_4( \ (_esbp)->esb_tag, (_esbp)->esb_handle, \ (_offset) + 12); \ \ EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ SFXGE_BAR_UNLOCK(_esbp); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #endif #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ ("not power of 2 aligned")); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ SFXGE_BAR_LOCK(_esbp); \ \ EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \ uint32_t, (_edp)->ed_u32[0]); \ \ /* \ * Make sure that previous writes to the dword have \ * been done. It should be cheaper than barrier just \ * after the write below. \ */ \ bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset), sizeof (efx_dword_t), \ BUS_SPACE_BARRIER_WRITE); \ bus_space_write_stream_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, \ (_offset), (_edp)->ed_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ SFXGE_BAR_UNLOCK(_esbp); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #if defined(SFXGE_USE_BUS_SPACE_8) #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ SFXGE_BAR_LOCK(_esbp); \ \ EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ \ /* \ * Make sure that previous writes to the qword have \ * been done. It should be cheaper than barrier just \ * after the write below. \ */ \ bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset), sizeof (efx_qword_t), \ BUS_SPACE_BARRIER_WRITE); \ bus_space_write_stream_8((_esbp)->esb_tag, \ (_esbp)->esb_handle, \ (_offset), (_eqp)->eq_u64[0]); \ \ SFXGE_BAR_UNLOCK(_esbp); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ SFXGE_BAR_LOCK(_esbp); \ \ EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ \ /* \ * Make sure that previous writes to the qword have \ * been done. It should be cheaper than barrier just \ * after the last write below. \ */ \ bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset), sizeof (efx_qword_t), \ BUS_SPACE_BARRIER_WRITE); \ bus_space_write_stream_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, \ (_offset), (_eqp)->eq_u32[0]); \ /* \ * It should be guaranteed that the last dword comes \ * the last, so barrier entire qword to be sure that \ * neither above nor below writes are reordered. \ */ \ bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset), sizeof (efx_qword_t), \ BUS_SPACE_BARRIER_WRITE); \ bus_space_write_stream_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, \ (_offset) + 4, (_eqp)->eq_u32[1]); \ \ SFXGE_BAR_UNLOCK(_esbp); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #endif /* * Guarantees 64bit aligned 64bit writes to write combined BAR mapping * (required by PIO hardware) */ #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ (void) (_esbp); \ \ /* FIXME: Perform a 64-bit write */ \ KASSERT(0, ("not implemented")); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #if defined(SFXGE_USE_BUS_SPACE_8) #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ ("not power of 2 aligned")); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ SFXGE_BAR_LOCK(_esbp); \ \ EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ \ /* \ * Make sure that previous writes to the oword have \ * been done. It should be cheaper than barrier just \ * after the last write below. \ */ \ bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset), sizeof (efx_oword_t), \ BUS_SPACE_BARRIER_WRITE); \ bus_space_write_stream_8((_esbp)->esb_tag, \ (_esbp)->esb_handle, \ (_offset), (_eop)->eo_u64[0]); \ /* \ * It should be guaranteed that the last qword comes \ * the last, so barrier entire oword to be sure that \ * neither above nor below writes are reordered. \ */ \ bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset), sizeof (efx_oword_t), \ BUS_SPACE_BARRIER_WRITE); \ bus_space_write_stream_8((_esbp)->esb_tag, \ (_esbp)->esb_handle, \ (_offset) + 8, (_eop)->eo_u64[1]); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ SFXGE_BAR_UNLOCK(_esbp); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ ("not power of 2 aligned")); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ SFXGE_BAR_LOCK(_esbp); \ \ EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ \ /* \ * Make sure that previous writes to the oword have \ * been done. It should be cheaper than barrier just \ * after the last write below. \ */ \ bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset), sizeof (efx_oword_t), \ BUS_SPACE_BARRIER_WRITE); \ bus_space_write_stream_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, \ (_offset), (_eop)->eo_u32[0]); \ bus_space_write_stream_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, \ (_offset) + 4, (_eop)->eo_u32[1]); \ bus_space_write_stream_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, \ (_offset) + 8, (_eop)->eo_u32[2]); \ /* \ * It should be guaranteed that the last dword comes \ * the last, so barrier entire oword to be sure that \ * neither above nor below writes are reordered. \ */ \ bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset), sizeof (efx_oword_t), \ BUS_SPACE_BARRIER_WRITE); \ bus_space_write_stream_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, \ (_offset) + 12, (_eop)->eo_u32[3]); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ SFXGE_BAR_UNLOCK(_esbp); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #endif /* Use the standard octo-word write for doorbell writes */ #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \ do { \ EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* SPIN */ #define EFSYS_SPIN(_us) \ do { \ DELAY(_us); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_SLEEP EFSYS_SPIN /* BARRIERS */ #define EFSYS_MEM_READ_BARRIER() rmb() #define EFSYS_PIO_WRITE_BARRIER() /* DMA SYNC */ #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) \ do { \ bus_dmamap_sync((_esmp)->esm_tag, \ (_esmp)->esm_map, \ BUS_DMASYNC_POSTREAD); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) \ do { \ bus_dmamap_sync((_esmp)->esm_tag, \ (_esmp)->esm_map, \ BUS_DMASYNC_PREWRITE); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* TIMESTAMP */ typedef clock_t efsys_timestamp_t; #define EFSYS_TIMESTAMP(_usp) \ do { \ clock_t now; \ \ now = ticks; \ *(_usp) = now * hz / 1000000; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* KMEM */ #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \ do { \ (_esip) = (_esip); \ /* \ * The macro is used in non-sleepable contexts, for \ * example, holding a mutex. \ */ \ (_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_KMEM_FREE(_esip, _size, _p) \ do { \ (void) (_esip); \ (void) (_size); \ free((_p), M_SFXGE); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* LOCK */ typedef struct efsys_lock_s { struct mtx lock; char lock_name[SFXGE_LOCK_NAME_MAX]; } efsys_lock_t; #define SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \ do { \ efsys_lock_t *__eslp = (_eslp); \ \ snprintf((__eslp)->lock_name, \ sizeof((__eslp)->lock_name), \ "%s:%s", (_ifname), (_label)); \ mtx_init(&(__eslp)->lock, (__eslp)->lock_name, \ NULL, MTX_DEF); \ } while (B_FALSE) #define SFXGE_EFSYS_LOCK_DESTROY(_eslp) \ mtx_destroy(&(_eslp)->lock) #define SFXGE_EFSYS_LOCK(_eslp) \ mtx_lock(&(_eslp)->lock) #define SFXGE_EFSYS_UNLOCK(_eslp) \ mtx_unlock(&(_eslp)->lock) #define SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp) \ mtx_assert(&(_eslp)->lock, MA_OWNED) #define EFSYS_LOCK_MAGIC 0x000010c4 #define EFSYS_LOCK(_lockp, _state) \ do { \ SFXGE_EFSYS_LOCK(_lockp); \ (_state) = EFSYS_LOCK_MAGIC; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_UNLOCK(_lockp, _state) \ do { \ if ((_state) != EFSYS_LOCK_MAGIC) \ KASSERT(B_FALSE, ("not locked")); \ SFXGE_EFSYS_UNLOCK(_lockp); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* PREEMPT */ #define EFSYS_PREEMPT_DISABLE(_state) \ do { \ (_state) = (_state); \ critical_enter(); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_PREEMPT_ENABLE(_state) \ do { \ (_state) = (_state); \ critical_exit(_state); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* STAT */ typedef uint64_t efsys_stat_t; #define EFSYS_STAT_INCR(_knp, _delta) \ do { \ *(_knp) += (_delta); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_STAT_DECR(_knp, _delta) \ do { \ *(_knp) -= (_delta); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_STAT_SET(_knp, _val) \ do { \ *(_knp) = (_val); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_STAT_SET_QWORD(_knp, _valp) \ do { \ *(_knp) = le64toh((_valp)->eq_u64[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_STAT_SET_DWORD(_knp, _valp) \ do { \ *(_knp) = le32toh((_valp)->ed_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \ do { \ *(_knp) += le64toh((_valp)->eq_u64[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \ do { \ *(_knp) -= le64toh((_valp)->eq_u64[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* ERR */ extern void sfxge_err(efsys_identifier_t *, unsigned int, uint32_t, uint32_t); #if EFSYS_OPT_DECODE_INTR_FATAL #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \ do { \ sfxge_err((_esip), (_code), (_dword0), (_dword1)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #endif /* ASSERT */ #define EFSYS_ASSERT(_exp) do { \ if (!(_exp)) \ panic("%s", #_exp); \ } while (0) #define EFSYS_ASSERT3(_x, _op, _y, _t) do { \ const _t __x = (_t)(_x); \ const _t __y = (_t)(_y); \ if (!(__x _op __y)) \ panic("assertion failed at %s:%u", __FILE__, __LINE__); \ } while(0) #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t) #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t) #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t) /* ROTATE */ #define EFSYS_HAS_ROTL_DWORD 0 #ifdef __cplusplus } #endif #endif /* _SYS_EFSYS_H */ Index: stable/10/sys/dev/sfxge/common/efx.h =================================================================== --- stable/10/sys/dev/sfxge/common/efx.h (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx.h (revision 293927) @@ -1,2300 +1,2310 @@ /*- * Copyright (c) 2006-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_EFX_H #define _SYS_EFX_H #include "efsys.h" #include "efx_phy_ids.h" #ifdef __cplusplus extern "C" { #endif -#define EFX_STATIC_ASSERT(_cond) ((void)sizeof(char[(_cond) ? 1 : -1])) +#define EFX_STATIC_ASSERT(_cond) \ + ((void)sizeof(char[(_cond) ? 1 : -1])) -#define EFX_ARRAY_SIZE(_array) (sizeof(_array) / sizeof((_array)[0])) +#define EFX_ARRAY_SIZE(_array) \ + (sizeof(_array) / sizeof((_array)[0])) -#define EFX_FIELD_OFFSET(_type, _field) ((size_t) &(((_type *)0)->_field)) +#define EFX_FIELD_OFFSET(_type, _field) \ + ((size_t) &(((_type *)0)->_field)) +/* Return codes */ + +typedef __success(return == 0) int efx_rc_t; + + +/* Chip families */ + typedef enum efx_family_e { EFX_FAMILY_INVALID, EFX_FAMILY_FALCON, EFX_FAMILY_SIENA, EFX_FAMILY_HUNTINGTON, EFX_FAMILY_NTYPES } efx_family_t; -extern __checkReturn int +extern __checkReturn efx_rc_t efx_family( __in uint16_t venid, __in uint16_t devid, __out efx_family_t *efp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_infer_family( __in efsys_bar_t *esbp, __out efx_family_t *efp); #define EFX_PCI_VENID_SFC 0x1924 #define EFX_PCI_DEVID_FALCON 0x0710 /* SFC4000 */ #define EFX_PCI_DEVID_BETHPAGE 0x0803 /* SFC9020 */ #define EFX_PCI_DEVID_SIENA 0x0813 /* SFL9021 */ #define EFX_PCI_DEVID_SIENA_F1_UNINIT 0x0810 #define EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT 0x0901 #define EFX_PCI_DEVID_FARMINGDALE 0x0903 /* SFC9120 PF */ #define EFX_PCI_DEVID_HUNTINGTON 0x0913 /* SFL9122 PF */ #define EFX_PCI_DEVID_GREENPORT 0x0923 /* SFC9140 PF */ #define EFX_PCI_DEVID_FARMINGDALE_VF 0x1903 /* SFC9120 VF */ #define EFX_PCI_DEVID_HUNTINGTON_VF 0x1913 /* SFL9122 VF */ #define EFX_PCI_DEVID_GREENPORT_VF 0x1923 /* SFC9140 VF */ #define EFX_MEM_BAR 2 /* Error codes */ enum { EFX_ERR_INVALID, EFX_ERR_SRAM_OOB, EFX_ERR_BUFID_DC_OOB, EFX_ERR_MEM_PERR, EFX_ERR_RBUF_OWN, EFX_ERR_TBUF_OWN, EFX_ERR_RDESQ_OWN, EFX_ERR_TDESQ_OWN, EFX_ERR_EVQ_OWN, EFX_ERR_EVFF_OFLO, EFX_ERR_ILL_ADDR, EFX_ERR_SRAM_PERR, EFX_ERR_NCODES }; /* Calculate the IEEE 802.3 CRC32 of a MAC addr */ extern __checkReturn uint32_t efx_crc32_calculate( __in uint32_t crc_init, __in_ecount(length) uint8_t const *input, __in int length); /* Type prototypes */ typedef struct efx_rxq_s efx_rxq_t; /* NIC */ typedef struct efx_nic_s efx_nic_t; #define EFX_NIC_FUNC_PRIMARY 0x00000001 #define EFX_NIC_FUNC_LINKCTRL 0x00000002 #define EFX_NIC_FUNC_TRUSTED 0x00000004 -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_create( __in efx_family_t family, __in efsys_identifier_t *esip, __in efsys_bar_t *esbp, __in efsys_lock_t *eslp, __deref_out efx_nic_t **enpp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_probe( __in efx_nic_t *enp); #if EFSYS_OPT_PCIE_TUNE -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_pcie_tune( __in efx_nic_t *enp, unsigned int nlanes); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_pcie_extended_sync( __in efx_nic_t *enp); #endif /* EFSYS_OPT_PCIE_TUNE */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_init( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_reset( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_register_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern void efx_nic_fini( __in efx_nic_t *enp); extern void efx_nic_unprobe( __in efx_nic_t *enp); extern void efx_nic_destroy( __in efx_nic_t *enp); #if EFSYS_OPT_MCDI #if EFSYS_OPT_HUNTINGTON /* Huntington requires MCDIv2 commands */ #define WITH_MCDI_V2 1 #endif typedef struct efx_mcdi_req_s efx_mcdi_req_t; typedef enum efx_mcdi_exception_e { EFX_MCDI_EXCEPTION_MC_REBOOT, EFX_MCDI_EXCEPTION_MC_BADASSERT, } efx_mcdi_exception_t; typedef struct efx_mcdi_transport_s { void *emt_context; efsys_mem_t *emt_dma_mem; void (*emt_execute)(void *, efx_mcdi_req_t *); void (*emt_ev_cpl)(void *); void (*emt_exception)(void *, efx_mcdi_exception_t); } efx_mcdi_transport_t; -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *mtp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_reboot( __in efx_nic_t *enp); void efx_mcdi_new_epoch( __in efx_nic_t *enp); extern void efx_mcdi_request_start( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __in boolean_t ev_cpl); extern __checkReturn boolean_t efx_mcdi_request_poll( __in efx_nic_t *enp); extern __checkReturn boolean_t efx_mcdi_request_abort( __in efx_nic_t *enp); extern void efx_mcdi_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_MCDI */ /* INTR */ #define EFX_NINTR_FALCON 64 #define EFX_NINTR_SIENA 1024 typedef enum efx_intr_type_e { EFX_INTR_INVALID = 0, EFX_INTR_LINE, EFX_INTR_MESSAGE, EFX_INTR_NTYPES } efx_intr_type_t; #define EFX_INTR_SIZE (sizeof (efx_oword_t)) -extern __checkReturn int +extern __checkReturn efx_rc_t efx_intr_init( __in efx_nic_t *enp, __in efx_intr_type_t type, __in efsys_mem_t *esmp); extern void efx_intr_enable( __in efx_nic_t *enp); extern void efx_intr_disable( __in efx_nic_t *enp); extern void efx_intr_disable_unlocked( __in efx_nic_t *enp); #define EFX_INTR_NEVQS 32 -extern __checkReturn int +extern __checkReturn efx_rc_t efx_intr_trigger( __in efx_nic_t *enp, __in unsigned int level); extern void efx_intr_status_line( __in efx_nic_t *enp, __out boolean_t *fatalp, __out uint32_t *maskp); extern void efx_intr_status_message( __in efx_nic_t *enp, __in unsigned int message, __out boolean_t *fatalp); extern void efx_intr_fatal( __in efx_nic_t *enp); extern void efx_intr_fini( __in efx_nic_t *enp); /* MAC */ #if EFSYS_OPT_MAC_STATS /* START MKCONFIG GENERATED EfxHeaderMacBlock e323546097fd7c65 */ typedef enum efx_mac_stat_e { EFX_MAC_RX_OCTETS, EFX_MAC_RX_PKTS, EFX_MAC_RX_UNICST_PKTS, EFX_MAC_RX_MULTICST_PKTS, EFX_MAC_RX_BRDCST_PKTS, EFX_MAC_RX_PAUSE_PKTS, EFX_MAC_RX_LE_64_PKTS, EFX_MAC_RX_65_TO_127_PKTS, EFX_MAC_RX_128_TO_255_PKTS, EFX_MAC_RX_256_TO_511_PKTS, EFX_MAC_RX_512_TO_1023_PKTS, EFX_MAC_RX_1024_TO_15XX_PKTS, EFX_MAC_RX_GE_15XX_PKTS, EFX_MAC_RX_ERRORS, EFX_MAC_RX_FCS_ERRORS, EFX_MAC_RX_DROP_EVENTS, EFX_MAC_RX_FALSE_CARRIER_ERRORS, EFX_MAC_RX_SYMBOL_ERRORS, EFX_MAC_RX_ALIGN_ERRORS, EFX_MAC_RX_INTERNAL_ERRORS, EFX_MAC_RX_JABBER_PKTS, EFX_MAC_RX_LANE0_CHAR_ERR, EFX_MAC_RX_LANE1_CHAR_ERR, EFX_MAC_RX_LANE2_CHAR_ERR, EFX_MAC_RX_LANE3_CHAR_ERR, EFX_MAC_RX_LANE0_DISP_ERR, EFX_MAC_RX_LANE1_DISP_ERR, EFX_MAC_RX_LANE2_DISP_ERR, EFX_MAC_RX_LANE3_DISP_ERR, EFX_MAC_RX_MATCH_FAULT, EFX_MAC_RX_NODESC_DROP_CNT, EFX_MAC_TX_OCTETS, EFX_MAC_TX_PKTS, EFX_MAC_TX_UNICST_PKTS, EFX_MAC_TX_MULTICST_PKTS, EFX_MAC_TX_BRDCST_PKTS, EFX_MAC_TX_PAUSE_PKTS, EFX_MAC_TX_LE_64_PKTS, EFX_MAC_TX_65_TO_127_PKTS, EFX_MAC_TX_128_TO_255_PKTS, EFX_MAC_TX_256_TO_511_PKTS, EFX_MAC_TX_512_TO_1023_PKTS, EFX_MAC_TX_1024_TO_15XX_PKTS, EFX_MAC_TX_GE_15XX_PKTS, EFX_MAC_TX_ERRORS, EFX_MAC_TX_SGL_COL_PKTS, EFX_MAC_TX_MULT_COL_PKTS, EFX_MAC_TX_EX_COL_PKTS, EFX_MAC_TX_LATE_COL_PKTS, EFX_MAC_TX_DEF_PKTS, EFX_MAC_TX_EX_DEF_PKTS, EFX_MAC_PM_TRUNC_BB_OVERFLOW, EFX_MAC_PM_DISCARD_BB_OVERFLOW, EFX_MAC_PM_TRUNC_VFIFO_FULL, EFX_MAC_PM_DISCARD_VFIFO_FULL, EFX_MAC_PM_TRUNC_QBB, EFX_MAC_PM_DISCARD_QBB, EFX_MAC_PM_DISCARD_MAPPING, EFX_MAC_RXDP_Q_DISABLED_PKTS, EFX_MAC_RXDP_DI_DROPPED_PKTS, EFX_MAC_RXDP_STREAMING_PKTS, EFX_MAC_RXDP_HLB_FETCH, EFX_MAC_RXDP_HLB_WAIT, EFX_MAC_VADAPTER_RX_UNICAST_PACKETS, EFX_MAC_VADAPTER_RX_UNICAST_BYTES, EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS, EFX_MAC_VADAPTER_RX_MULTICAST_BYTES, EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS, EFX_MAC_VADAPTER_RX_BROADCAST_BYTES, EFX_MAC_VADAPTER_RX_BAD_PACKETS, EFX_MAC_VADAPTER_RX_BAD_BYTES, EFX_MAC_VADAPTER_RX_OVERFLOW, EFX_MAC_VADAPTER_TX_UNICAST_PACKETS, EFX_MAC_VADAPTER_TX_UNICAST_BYTES, EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS, EFX_MAC_VADAPTER_TX_MULTICAST_BYTES, EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS, EFX_MAC_VADAPTER_TX_BROADCAST_BYTES, EFX_MAC_VADAPTER_TX_BAD_PACKETS, EFX_MAC_VADAPTER_TX_BAD_BYTES, EFX_MAC_VADAPTER_TX_OVERFLOW, EFX_MAC_NSTATS } efx_mac_stat_t; /* END MKCONFIG GENERATED EfxHeaderMacBlock */ #endif /* EFSYS_OPT_MAC_STATS */ typedef enum efx_link_mode_e { EFX_LINK_UNKNOWN = 0, EFX_LINK_DOWN, EFX_LINK_10HDX, EFX_LINK_10FDX, EFX_LINK_100HDX, EFX_LINK_100FDX, EFX_LINK_1000HDX, EFX_LINK_1000FDX, EFX_LINK_10000FDX, EFX_LINK_40000FDX, EFX_LINK_NMODES } efx_link_mode_t; #define EFX_MAC_ADDR_LEN 6 #define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t*)_address)[0] & 0x01) #define EFX_MAC_MULTICAST_LIST_MAX 256 #define EFX_MAC_SDU_MAX 9202 #define EFX_MAC_PDU(_sdu) \ P2ROUNDUP(((_sdu) \ + /* EtherII */ 14 \ + /* VLAN */ 4 \ + /* CRC */ 4 \ + /* bug16011 */ 16), \ (1 << 3)) #define EFX_MAC_PDU_MIN 60 #define EFX_MAC_PDU_MAX EFX_MAC_PDU(EFX_MAC_SDU_MAX) -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_pdu_set( __in efx_nic_t *enp, __in size_t pdu); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_addr_set( __in efx_nic_t *enp, __in uint8_t *addr); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_filter_set( __in efx_nic_t *enp, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_multicast_list_set( __in efx_nic_t *enp, __in_ecount(6*count) uint8_t const *addrs, __in int count); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss); extern void efx_mac_filter_default_rxq_clear( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_drain( __in efx_nic_t *enp, __in boolean_t enabled); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp); #define EFX_FCNTL_RESPOND 0x00000001 #define EFX_FCNTL_GENERATE 0x00000002 -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_fcntl_set( __in efx_nic_t *enp, __in unsigned int fcntl, __in boolean_t autoneg); extern void efx_mac_fcntl_get( __in efx_nic_t *enp, __out unsigned int *fcntl_wantedp, __out unsigned int *fcntl_linkp); #define EFX_MAC_HASH_BITS (1 << 8) -extern __checkReturn int +extern __checkReturn efx_rc_t efx_pktfilter_init( __in efx_nic_t *enp); extern void efx_pktfilter_fini( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_pktfilter_set( __in efx_nic_t *enp, __in boolean_t unicst, __in boolean_t brdcst); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_hash_set( __in efx_nic_t *enp, __in_ecount(EFX_MAC_HASH_BITS) unsigned int const *bucket); #if EFSYS_OPT_MCAST_FILTER_LIST -extern __checkReturn int +extern __checkReturn efx_rc_t efx_pktfilter_mcast_list_set( __in efx_nic_t *enp, __in uint8_t const *addrs, __in int count); #endif /* EFSYS_OPT_MCAST_FILTER_LIST */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_pktfilter_mcast_all( __in efx_nic_t *enp); #if EFSYS_OPT_MAC_STATS #if EFSYS_OPT_NAMES extern __checkReturn const char * efx_mac_stat_name( __in efx_nic_t *enp, __in unsigned int id); #endif /* EFSYS_OPT_NAMES */ #define EFX_MAC_STATS_SIZE 0x400 /* * Upload mac statistics supported by the hardware into the given buffer. * * The reference buffer must be at least %EFX_MAC_STATS_SIZE bytes, * and page aligned. * * The hardware will only DMA statistics that it understands (of course). * Drivers should not make any assumptions about which statistics are * supported, especially when the statistics are generated by firmware. * * Thus, drivers should zero this buffer before use, so that not-understood * statistics read back as zero. */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_stats_upload( __in efx_nic_t *enp, __in efsys_mem_t *esmp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_stats_periodic( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __in uint16_t period_ms, __in boolean_t events); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp); #endif /* EFSYS_OPT_MAC_STATS */ /* MON */ typedef enum efx_mon_type_e { EFX_MON_INVALID = 0, EFX_MON_NULL, EFX_MON_LM87, EFX_MON_MAX6647, EFX_MON_SFC90X0, EFX_MON_SFC91X0, EFX_MON_NTYPES } efx_mon_type_t; #if EFSYS_OPT_NAMES extern const char * efx_mon_name( __in efx_nic_t *enp); #endif /* EFSYS_OPT_NAMES */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mon_init( __in efx_nic_t *enp); #if EFSYS_OPT_MON_STATS #define EFX_MON_STATS_PAGE_SIZE 0x100 #define EFX_MON_MASK_ELEMENT_SIZE 32 /* START MKCONFIG GENERATED MonitorHeaderStatsBlock c79c86b62a144846 */ typedef enum efx_mon_stat_e { EFX_MON_STAT_2_5V, EFX_MON_STAT_VCCP1, EFX_MON_STAT_VCC, EFX_MON_STAT_5V, EFX_MON_STAT_12V, EFX_MON_STAT_VCCP2, EFX_MON_STAT_EXT_TEMP, EFX_MON_STAT_INT_TEMP, EFX_MON_STAT_AIN1, EFX_MON_STAT_AIN2, EFX_MON_STAT_INT_COOLING, EFX_MON_STAT_EXT_COOLING, EFX_MON_STAT_1V, EFX_MON_STAT_1_2V, EFX_MON_STAT_1_8V, EFX_MON_STAT_3_3V, EFX_MON_STAT_1_2VA, EFX_MON_STAT_VREF, EFX_MON_STAT_VAOE, EFX_MON_STAT_AOE_TEMP, EFX_MON_STAT_PSU_AOE_TEMP, EFX_MON_STAT_PSU_TEMP, EFX_MON_STAT_FAN0, EFX_MON_STAT_FAN1, EFX_MON_STAT_FAN2, EFX_MON_STAT_FAN3, EFX_MON_STAT_FAN4, EFX_MON_STAT_VAOE_IN, EFX_MON_STAT_IAOE, EFX_MON_STAT_IAOE_IN, EFX_MON_STAT_NIC_POWER, EFX_MON_STAT_0_9V, EFX_MON_STAT_I0_9V, EFX_MON_STAT_I1_2V, EFX_MON_STAT_0_9V_ADC, EFX_MON_STAT_INT_TEMP2, EFX_MON_STAT_VREG_TEMP, EFX_MON_STAT_VREG_0_9V_TEMP, EFX_MON_STAT_VREG_1_2V_TEMP, EFX_MON_STAT_INT_VPTAT, EFX_MON_STAT_INT_ADC_TEMP, EFX_MON_STAT_EXT_VPTAT, EFX_MON_STAT_EXT_ADC_TEMP, EFX_MON_STAT_AMBIENT_TEMP, EFX_MON_STAT_AIRFLOW, EFX_MON_STAT_VDD08D_VSS08D_CSR, EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC, EFX_MON_STAT_HOTPOINT_TEMP, EFX_MON_STAT_PHY_POWER_SWITCH_PORT0, EFX_MON_STAT_PHY_POWER_SWITCH_PORT1, EFX_MON_STAT_MUM_VCC, EFX_MON_STAT_0V9_A, EFX_MON_STAT_I0V9_A, EFX_MON_STAT_0V9_A_TEMP, EFX_MON_STAT_0V9_B, EFX_MON_STAT_I0V9_B, EFX_MON_STAT_0V9_B_TEMP, EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY, EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXT_ADC, EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY, EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXT_ADC, EFX_MON_STAT_CONTROLLER_MASTER_VPTAT, EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP, EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXT_ADC, EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC, EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT, EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP, EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXT_ADC, EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC, EFX_MON_NSTATS } efx_mon_stat_t; /* END MKCONFIG GENERATED MonitorHeaderStatsBlock */ typedef enum efx_mon_stat_state_e { EFX_MON_STAT_STATE_OK = 0, EFX_MON_STAT_STATE_WARNING = 1, EFX_MON_STAT_STATE_FATAL = 2, EFX_MON_STAT_STATE_BROKEN = 3, EFX_MON_STAT_STATE_NO_READING = 4, } efx_mon_stat_state_t; typedef struct efx_mon_stat_value_s { uint16_t emsv_value; uint16_t emsv_state; } efx_mon_stat_value_t; #if EFSYS_OPT_NAMES extern const char * efx_mon_stat_name( __in efx_nic_t *enp, __in efx_mon_stat_t id); #endif /* EFSYS_OPT_NAMES */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mon_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values); #endif /* EFSYS_OPT_MON_STATS */ extern void efx_mon_fini( __in efx_nic_t *enp); /* PHY */ #define PMA_PMD_MMD 1 #define PCS_MMD 3 #define PHY_XS_MMD 4 #define DTE_XS_MMD 5 #define AN_MMD 7 #define CL22EXT_MMD 29 #define MAXMMD ((1 << 5) - 1) -extern __checkReturn int +extern __checkReturn efx_rc_t efx_phy_verify( __in efx_nic_t *enp); #if EFSYS_OPT_PHY_LED_CONTROL typedef enum efx_phy_led_mode_e { EFX_PHY_LED_DEFAULT = 0, EFX_PHY_LED_OFF, EFX_PHY_LED_ON, EFX_PHY_LED_FLASH, EFX_PHY_LED_NMODES } efx_phy_led_mode_t; -extern __checkReturn int +extern __checkReturn efx_rc_t efx_phy_led_set( __in efx_nic_t *enp, __in efx_phy_led_mode_t mode); #endif /* EFSYS_OPT_PHY_LED_CONTROL */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_port_init( __in efx_nic_t *enp); #if EFSYS_OPT_LOOPBACK typedef enum efx_loopback_type_e { EFX_LOOPBACK_OFF = 0, EFX_LOOPBACK_DATA = 1, EFX_LOOPBACK_GMAC = 2, EFX_LOOPBACK_XGMII = 3, EFX_LOOPBACK_XGXS = 4, EFX_LOOPBACK_XAUI = 5, EFX_LOOPBACK_GMII = 6, EFX_LOOPBACK_SGMII = 7, EFX_LOOPBACK_XGBR = 8, EFX_LOOPBACK_XFI = 9, EFX_LOOPBACK_XAUI_FAR = 10, EFX_LOOPBACK_GMII_FAR = 11, EFX_LOOPBACK_SGMII_FAR = 12, EFX_LOOPBACK_XFI_FAR = 13, EFX_LOOPBACK_GPHY = 14, EFX_LOOPBACK_PHY_XS = 15, EFX_LOOPBACK_PCS = 16, EFX_LOOPBACK_PMA_PMD = 17, EFX_LOOPBACK_XPORT = 18, EFX_LOOPBACK_XGMII_WS = 19, EFX_LOOPBACK_XAUI_WS = 20, EFX_LOOPBACK_XAUI_WS_FAR = 21, EFX_LOOPBACK_XAUI_WS_NEAR = 22, EFX_LOOPBACK_GMII_WS = 23, EFX_LOOPBACK_XFI_WS = 24, EFX_LOOPBACK_XFI_WS_FAR = 25, EFX_LOOPBACK_PHYXS_WS = 26, EFX_LOOPBACK_PMA_INT = 27, EFX_LOOPBACK_SD_NEAR = 28, EFX_LOOPBACK_SD_FAR = 29, EFX_LOOPBACK_PMA_INT_WS = 30, EFX_LOOPBACK_SD_FEP2_WS = 31, EFX_LOOPBACK_SD_FEP1_5_WS = 32, EFX_LOOPBACK_SD_FEP_WS = 33, EFX_LOOPBACK_SD_FES_WS = 34, EFX_LOOPBACK_NTYPES } efx_loopback_type_t; typedef enum efx_loopback_kind_e { EFX_LOOPBACK_KIND_OFF = 0, EFX_LOOPBACK_KIND_ALL, EFX_LOOPBACK_KIND_MAC, EFX_LOOPBACK_KIND_PHY, EFX_LOOPBACK_NKINDS } efx_loopback_kind_t; extern void efx_loopback_mask( __in efx_loopback_kind_t loopback_kind, __out efx_qword_t *maskp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_port_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t type); #if EFSYS_OPT_NAMES extern __checkReturn const char * efx_loopback_type_name( __in efx_nic_t *enp, __in efx_loopback_type_t type); #endif /* EFSYS_OPT_NAMES */ #endif /* EFSYS_OPT_LOOPBACK */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_port_poll( __in efx_nic_t *enp, __out_opt efx_link_mode_t *link_modep); extern void efx_port_fini( __in efx_nic_t *enp); typedef enum efx_phy_cap_type_e { EFX_PHY_CAP_INVALID = 0, EFX_PHY_CAP_10HDX, EFX_PHY_CAP_10FDX, EFX_PHY_CAP_100HDX, EFX_PHY_CAP_100FDX, EFX_PHY_CAP_1000HDX, EFX_PHY_CAP_1000FDX, EFX_PHY_CAP_10000FDX, EFX_PHY_CAP_PAUSE, EFX_PHY_CAP_ASYM, EFX_PHY_CAP_AN, EFX_PHY_CAP_40000FDX, EFX_PHY_CAP_NTYPES } efx_phy_cap_type_t; #define EFX_PHY_CAP_CURRENT 0x00000000 #define EFX_PHY_CAP_DEFAULT 0x00000001 #define EFX_PHY_CAP_PERM 0x00000002 extern void efx_phy_adv_cap_get( __in efx_nic_t *enp, __in uint32_t flag, __out uint32_t *maskp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_phy_adv_cap_set( __in efx_nic_t *enp, __in uint32_t mask); extern void efx_phy_lp_cap_get( __in efx_nic_t *enp, __out uint32_t *maskp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip); typedef enum efx_phy_media_type_e { EFX_PHY_MEDIA_INVALID = 0, EFX_PHY_MEDIA_XAUI, EFX_PHY_MEDIA_CX4, EFX_PHY_MEDIA_KX4, EFX_PHY_MEDIA_XFP, EFX_PHY_MEDIA_SFP_PLUS, EFX_PHY_MEDIA_BASE_T, EFX_PHY_MEDIA_QSFP_PLUS, EFX_PHY_MEDIA_NTYPES } efx_phy_media_type_t; /* Get the type of medium currently used. If the board has ports for * modules, a module is present, and we recognise the media type of * the module, then this will be the media type of the module. * Otherwise it will be the media type of the port. */ extern void efx_phy_media_type_get( __in efx_nic_t *enp, __out efx_phy_media_type_t *typep); #if EFSYS_OPT_PHY_STATS /* START MKCONFIG GENERATED PhyHeaderStatsBlock 30ed56ad501f8e36 */ typedef enum efx_phy_stat_e { EFX_PHY_STAT_OUI, EFX_PHY_STAT_PMA_PMD_LINK_UP, EFX_PHY_STAT_PMA_PMD_RX_FAULT, EFX_PHY_STAT_PMA_PMD_TX_FAULT, EFX_PHY_STAT_PMA_PMD_REV_A, EFX_PHY_STAT_PMA_PMD_REV_B, EFX_PHY_STAT_PMA_PMD_REV_C, EFX_PHY_STAT_PMA_PMD_REV_D, EFX_PHY_STAT_PCS_LINK_UP, EFX_PHY_STAT_PCS_RX_FAULT, EFX_PHY_STAT_PCS_TX_FAULT, EFX_PHY_STAT_PCS_BER, EFX_PHY_STAT_PCS_BLOCK_ERRORS, EFX_PHY_STAT_PHY_XS_LINK_UP, EFX_PHY_STAT_PHY_XS_RX_FAULT, EFX_PHY_STAT_PHY_XS_TX_FAULT, EFX_PHY_STAT_PHY_XS_ALIGN, EFX_PHY_STAT_PHY_XS_SYNC_A, EFX_PHY_STAT_PHY_XS_SYNC_B, EFX_PHY_STAT_PHY_XS_SYNC_C, EFX_PHY_STAT_PHY_XS_SYNC_D, EFX_PHY_STAT_AN_LINK_UP, EFX_PHY_STAT_AN_MASTER, EFX_PHY_STAT_AN_LOCAL_RX_OK, EFX_PHY_STAT_AN_REMOTE_RX_OK, EFX_PHY_STAT_CL22EXT_LINK_UP, EFX_PHY_STAT_SNR_A, EFX_PHY_STAT_SNR_B, EFX_PHY_STAT_SNR_C, EFX_PHY_STAT_SNR_D, EFX_PHY_STAT_PMA_PMD_SIGNAL_A, EFX_PHY_STAT_PMA_PMD_SIGNAL_B, EFX_PHY_STAT_PMA_PMD_SIGNAL_C, EFX_PHY_STAT_PMA_PMD_SIGNAL_D, EFX_PHY_STAT_AN_COMPLETE, EFX_PHY_STAT_PMA_PMD_REV_MAJOR, EFX_PHY_STAT_PMA_PMD_REV_MINOR, EFX_PHY_STAT_PMA_PMD_REV_MICRO, EFX_PHY_STAT_PCS_FW_VERSION_0, EFX_PHY_STAT_PCS_FW_VERSION_1, EFX_PHY_STAT_PCS_FW_VERSION_2, EFX_PHY_STAT_PCS_FW_VERSION_3, EFX_PHY_STAT_PCS_FW_BUILD_YY, EFX_PHY_STAT_PCS_FW_BUILD_MM, EFX_PHY_STAT_PCS_FW_BUILD_DD, EFX_PHY_STAT_PCS_OP_MODE, EFX_PHY_NSTATS } efx_phy_stat_t; /* END MKCONFIG GENERATED PhyHeaderStatsBlock */ #if EFSYS_OPT_NAMES extern const char * efx_phy_stat_name( __in efx_nic_t *enp, __in efx_phy_stat_t stat); #endif /* EFSYS_OPT_NAMES */ #define EFX_PHY_STATS_SIZE 0x100 -extern __checkReturn int +extern __checkReturn efx_rc_t efx_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES extern const char * efx_phy_prop_name( __in efx_nic_t *enp, __in unsigned int id); #endif /* EFSYS_OPT_NAMES */ #define EFX_PHY_PROP_DEFAULT 0x00000001 -extern __checkReturn int +extern __checkReturn efx_rc_t efx_phy_prop_get( __in efx_nic_t *enp, __in unsigned int id, __in uint32_t flags, __out uint32_t *valp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_phy_prop_set( __in efx_nic_t *enp, __in unsigned int id, __in uint32_t val); #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST typedef enum efx_bist_type_e { EFX_BIST_TYPE_UNKNOWN, EFX_BIST_TYPE_PHY_NORMAL, EFX_BIST_TYPE_PHY_CABLE_SHORT, EFX_BIST_TYPE_PHY_CABLE_LONG, EFX_BIST_TYPE_MC_MEM, /* Test the MC DMEM and IMEM */ EFX_BIST_TYPE_SAT_MEM, /* Test the DMEM and IMEM of satellite cpus*/ EFX_BIST_TYPE_REG, /* Test the register memories */ EFX_BIST_TYPE_NTYPES, } efx_bist_type_t; typedef enum efx_bist_result_e { EFX_BIST_RESULT_UNKNOWN, EFX_BIST_RESULT_RUNNING, EFX_BIST_RESULT_PASSED, EFX_BIST_RESULT_FAILED, } efx_bist_result_t; typedef enum efx_phy_cable_status_e { EFX_PHY_CABLE_STATUS_OK, EFX_PHY_CABLE_STATUS_INVALID, EFX_PHY_CABLE_STATUS_OPEN, EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT, EFX_PHY_CABLE_STATUS_INTERPAIRSHORT, EFX_PHY_CABLE_STATUS_BUSY, } efx_phy_cable_status_t; typedef enum efx_bist_value_e { EFX_BIST_PHY_CABLE_LENGTH_A, EFX_BIST_PHY_CABLE_LENGTH_B, EFX_BIST_PHY_CABLE_LENGTH_C, EFX_BIST_PHY_CABLE_LENGTH_D, EFX_BIST_PHY_CABLE_STATUS_A, EFX_BIST_PHY_CABLE_STATUS_B, EFX_BIST_PHY_CABLE_STATUS_C, EFX_BIST_PHY_CABLE_STATUS_D, EFX_BIST_FAULT_CODE, /* Memory BIST specific values. These match to the MC_CMD_BIST_POLL * response. */ EFX_BIST_MEM_TEST, EFX_BIST_MEM_ADDR, EFX_BIST_MEM_BUS, EFX_BIST_MEM_EXPECT, EFX_BIST_MEM_ACTUAL, EFX_BIST_MEM_ECC, EFX_BIST_MEM_ECC_PARITY, EFX_BIST_MEM_ECC_FATAL, EFX_BIST_NVALUES, } efx_bist_value_t; -extern __checkReturn int +extern __checkReturn efx_rc_t efx_bist_enable_offline( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt uint32_t *value_maskp, __out_ecount_opt(count) unsigned long *valuesp, __in size_t count); extern void efx_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type); #endif /* EFSYS_OPT_BIST */ #define EFX_FEATURE_IPV6 0x00000001 #define EFX_FEATURE_LFSR_HASH_INSERT 0x00000002 #define EFX_FEATURE_LINK_EVENTS 0x00000004 #define EFX_FEATURE_PERIODIC_MAC_STATS 0x00000008 #define EFX_FEATURE_WOL 0x00000010 #define EFX_FEATURE_MCDI 0x00000020 #define EFX_FEATURE_LOOKAHEAD_SPLIT 0x00000040 #define EFX_FEATURE_MAC_HEADER_FILTERS 0x00000080 #define EFX_FEATURE_TURBO 0x00000100 #define EFX_FEATURE_MCDI_DMA 0x00000200 #define EFX_FEATURE_TX_SRC_FILTERS 0x00000400 #define EFX_FEATURE_PIO_BUFFERS 0x00000800 #define EFX_FEATURE_FW_ASSISTED_TSO 0x00001000 typedef struct efx_nic_cfg_s { uint32_t enc_board_type; uint32_t enc_phy_type; #if EFSYS_OPT_NAMES char enc_phy_name[21]; #endif char enc_phy_revision[21]; efx_mon_type_t enc_mon_type; #if EFSYS_OPT_MON_STATS uint32_t enc_mon_stat_dma_buf_size; uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32]; #endif unsigned int enc_features; uint8_t enc_mac_addr[6]; uint8_t enc_port; /* PHY port number */ uint32_t enc_func_flags; uint32_t enc_intr_vec_base; uint32_t enc_intr_limit; uint32_t enc_evq_limit; uint32_t enc_txq_limit; uint32_t enc_rxq_limit; uint32_t enc_buftbl_limit; uint32_t enc_piobuf_limit; uint32_t enc_piobuf_size; uint32_t enc_evq_timer_quantum_ns; uint32_t enc_evq_timer_max_us; uint32_t enc_clk_mult; uint32_t enc_rx_prefix_size; uint32_t enc_rx_buf_align_start; uint32_t enc_rx_buf_align_end; #if EFSYS_OPT_LOOPBACK efx_qword_t enc_loopback_types[EFX_LINK_NMODES]; #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_PHY_FLAGS uint32_t enc_phy_flags_mask; #endif /* EFSYS_OPT_PHY_FLAGS */ #if EFSYS_OPT_PHY_LED_CONTROL uint32_t enc_led_mask; #endif /* EFSYS_OPT_PHY_LED_CONTROL */ #if EFSYS_OPT_PHY_STATS uint64_t enc_phy_stat_mask; #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS unsigned int enc_phy_nprops; #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_SIENA uint8_t enc_mcdi_mdio_channel; #if EFSYS_OPT_PHY_STATS uint32_t enc_mcdi_phy_stat_mask; #endif /* EFSYS_OPT_PHY_STATS */ #endif /* EFSYS_OPT_SIENA */ #if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON) #if EFSYS_OPT_MON_STATS uint32_t *enc_mcdi_sensor_maskp; uint32_t enc_mcdi_sensor_mask_size; #endif /* EFSYS_OPT_MON_STATS */ #endif /* (EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON) */ #if EFSYS_OPT_BIST uint32_t enc_bist_mask; #endif /* EFSYS_OPT_BIST */ #if EFSYS_OPT_HUNTINGTON uint32_t enc_pf; uint32_t enc_vf; uint32_t enc_privilege_mask; #endif /* EFSYS_OPT_HUNTINGTON */ boolean_t enc_bug26807_workaround; boolean_t enc_bug35388_workaround; boolean_t enc_bug41750_workaround; boolean_t enc_rx_batching_enabled; /* Maximum number of descriptors completed in an rx event. */ uint32_t enc_rx_batch_max; /* Number of rx descriptors the hardware requires for a push. */ uint32_t enc_rx_push_align; /* * Maximum number of bytes into the packet the TCP header can start for * the hardware to apply TSO packet edits. */ uint32_t enc_tx_tso_tcp_header_offset_limit; boolean_t enc_fw_assisted_tso_enabled; boolean_t enc_hw_tx_insert_vlan_enabled; /* Datapath firmware vadapter/vport/vswitch support */ boolean_t enc_datapath_cap_evb; /* External port identifier */ uint8_t enc_external_port; } efx_nic_cfg_t; #define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff) #define EFX_PCI_FUNCTION_IS_VF(_encp) ((_encp)->enc_vf != 0xffff) #define EFX_PCI_FUNCTION(_encp) \ (EFX_PCI_FUNCTION_IS_PF(_encp) ? (_encp)->enc_pf : (_encp)->enc_vf) #define EFX_PCI_VF_PARENT(_encp) ((_encp)->enc_pf) extern const efx_nic_cfg_t * efx_nic_cfg_get( __in efx_nic_t *enp); /* Driver resource limits (minimum required/maximum usable). */ typedef struct efx_drv_limits_s { uint32_t edl_min_evq_count; uint32_t edl_max_evq_count; uint32_t edl_min_rxq_count; uint32_t edl_max_rxq_count; uint32_t edl_min_txq_count; uint32_t edl_max_txq_count; /* PIO blocks (sub-allocated from piobuf) */ uint32_t edl_min_pio_alloc_size; uint32_t edl_max_pio_alloc_count; } efx_drv_limits_t; -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_set_drv_limits( __inout efx_nic_t *enp, __in efx_drv_limits_t *edlp); typedef enum efx_nic_region_e { EFX_REGION_VI, /* Memory BAR UC mapping */ EFX_REGION_PIO_WRITE_VI, /* Memory BAR WC mapping */ } efx_nic_region_t; -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_get_bar_region( __in efx_nic_t *enp, __in efx_nic_region_t region, __out uint32_t *offsetp, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_get_vi_pool( __in efx_nic_t *enp, __out uint32_t *evq_countp, __out uint32_t *rxq_countp, __out uint32_t *txq_countp); #if EFSYS_OPT_VPD typedef enum efx_vpd_tag_e { EFX_VPD_ID = 0x02, EFX_VPD_END = 0x0f, EFX_VPD_RO = 0x10, EFX_VPD_RW = 0x11, } efx_vpd_tag_t; typedef uint16_t efx_vpd_keyword_t; typedef struct efx_vpd_value_s { efx_vpd_tag_t evv_tag; efx_vpd_keyword_t evv_keyword; uint8_t evv_length; uint8_t evv_value[0x100]; } efx_vpd_value_t; #define EFX_VPD_KEYWORD(x, y) ((x) | ((y) << 8)) -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_init( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_size( __in efx_nic_t *enp, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_set( __in efx_nic_t *enp, __inout_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_next( __in efx_nic_t *enp, __inout_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern void efx_vpd_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_VPD */ /* NVRAM */ #if EFSYS_OPT_NVRAM typedef enum efx_nvram_type_e { EFX_NVRAM_INVALID = 0, EFX_NVRAM_BOOTROM, EFX_NVRAM_BOOTROM_CFG, EFX_NVRAM_MC_FIRMWARE, EFX_NVRAM_MC_GOLDEN, EFX_NVRAM_PHY, EFX_NVRAM_NULLPHY, EFX_NVRAM_FPGA, EFX_NVRAM_FCFW, EFX_NVRAM_CPLD, EFX_NVRAM_FPGA_BACKUP, EFX_NVRAM_DYNAMIC_CFG, EFX_NVRAM_NTYPES, } efx_nvram_type_t; -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nvram_init( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nvram_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nvram_size( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nvram_rw_start( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out_opt size_t *pref_chunkp); extern void efx_nvram_rw_finish( __in efx_nic_t *enp, __in efx_nvram_type_t type); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nvram_get_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nvram_read_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nvram_set_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in_ecount(4) uint16_t version[4]); /* Validate contents of TLV formatted partition */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nvram_tlv_validate( __in efx_nic_t *enp, __in uint32_t partn, __in_bcount(partn_size) caddr_t partn_data, __in size_t partn_size); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nvram_erase( __in efx_nic_t *enp, __in efx_nvram_type_t type); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nvram_write_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __in_bcount(size) caddr_t data, __in size_t size); extern void efx_nvram_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_NVRAM */ #if EFSYS_OPT_BOOTCFG -extern int +extern efx_rc_t efx_bootcfg_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size); -extern int +extern efx_rc_t efx_bootcfg_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); #endif /* EFSYS_OPT_BOOTCFG */ #if EFSYS_OPT_WOL typedef enum efx_wol_type_e { EFX_WOL_TYPE_INVALID, EFX_WOL_TYPE_MAGIC, EFX_WOL_TYPE_BITMAP, EFX_WOL_TYPE_LINK, EFX_WOL_NTYPES, } efx_wol_type_t; typedef enum efx_lightsout_offload_type_e { EFX_LIGHTSOUT_OFFLOAD_TYPE_INVALID, EFX_LIGHTSOUT_OFFLOAD_TYPE_ARP, EFX_LIGHTSOUT_OFFLOAD_TYPE_NS, } efx_lightsout_offload_type_t; #define EFX_WOL_BITMAP_MASK_SIZE (48) #define EFX_WOL_BITMAP_VALUE_SIZE (128) typedef union efx_wol_param_u { struct { uint8_t mac_addr[6]; } ewp_magic; struct { uint8_t mask[EFX_WOL_BITMAP_MASK_SIZE]; /* 1 bit per byte */ uint8_t value[EFX_WOL_BITMAP_VALUE_SIZE]; /* value to match */ uint8_t value_len; } ewp_bitmap; } efx_wol_param_t; typedef union efx_lightsout_offload_param_u { struct { uint8_t mac_addr[6]; uint32_t ip; } elop_arp; struct { uint8_t mac_addr[6]; uint32_t solicited_node[4]; uint32_t ip[4]; } elop_ns; } efx_lightsout_offload_param_t; -extern __checkReturn int +extern __checkReturn efx_rc_t efx_wol_init( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_wol_filter_clear( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_wol_filter_add( __in efx_nic_t *enp, __in efx_wol_type_t type, __in efx_wol_param_t *paramp, __out uint32_t *filter_idp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_wol_filter_remove( __in efx_nic_t *enp, __in uint32_t filter_id); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_lightsout_offload_add( __in efx_nic_t *enp, __in efx_lightsout_offload_type_t type, __in efx_lightsout_offload_param_t *paramp, __out uint32_t *filter_idp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_lightsout_offload_remove( __in efx_nic_t *enp, __in efx_lightsout_offload_type_t type, __in uint32_t filter_id); extern void efx_wol_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_WOL */ #if EFSYS_OPT_DIAG typedef enum efx_pattern_type_t { EFX_PATTERN_BYTE_INCREMENT = 0, EFX_PATTERN_ALL_THE_SAME, EFX_PATTERN_BIT_ALTERNATE, EFX_PATTERN_BYTE_ALTERNATE, EFX_PATTERN_BYTE_CHANGING, EFX_PATTERN_BIT_SWEEP, EFX_PATTERN_NTYPES } efx_pattern_type_t; typedef void (*efx_sram_pattern_fn_t)( __in size_t row, __in boolean_t negate, __out efx_qword_t *eqp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_sram_test( __in efx_nic_t *enp, __in efx_pattern_type_t type); #endif /* EFSYS_OPT_DIAG */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_sram_buf_tbl_set( __in efx_nic_t *enp, __in uint32_t id, __in efsys_mem_t *esmp, __in size_t n); extern void efx_sram_buf_tbl_clear( __in efx_nic_t *enp, __in uint32_t id, __in size_t n); #define EFX_BUF_TBL_SIZE 0x20000 #define EFX_BUF_SIZE 4096 /* EV */ typedef struct efx_evq_s efx_evq_t; #if EFSYS_OPT_QSTATS /* START MKCONFIG GENERATED EfxHeaderEventQueueBlock 6f3843f5fe7cc843 */ typedef enum efx_ev_qstat_e { EV_ALL, EV_RX, EV_RX_OK, EV_RX_FRM_TRUNC, EV_RX_TOBE_DISC, EV_RX_PAUSE_FRM_ERR, EV_RX_BUF_OWNER_ID_ERR, EV_RX_IPV4_HDR_CHKSUM_ERR, EV_RX_TCP_UDP_CHKSUM_ERR, EV_RX_ETH_CRC_ERR, EV_RX_IP_FRAG_ERR, EV_RX_MCAST_PKT, EV_RX_MCAST_HASH_MATCH, EV_RX_TCP_IPV4, EV_RX_TCP_IPV6, EV_RX_UDP_IPV4, EV_RX_UDP_IPV6, EV_RX_OTHER_IPV4, EV_RX_OTHER_IPV6, EV_RX_NON_IP, EV_RX_BATCH, EV_TX, EV_TX_WQ_FF_FULL, EV_TX_PKT_ERR, EV_TX_PKT_TOO_BIG, EV_TX_UNEXPECTED, EV_GLOBAL, EV_GLOBAL_MNT, EV_DRIVER, EV_DRIVER_SRM_UPD_DONE, EV_DRIVER_TX_DESCQ_FLS_DONE, EV_DRIVER_RX_DESCQ_FLS_DONE, EV_DRIVER_RX_DESCQ_FLS_FAILED, EV_DRIVER_RX_DSC_ERROR, EV_DRIVER_TX_DSC_ERROR, EV_DRV_GEN, EV_MCDI_RESPONSE, EV_NQSTATS } efx_ev_qstat_t; /* END MKCONFIG GENERATED EfxHeaderEventQueueBlock */ #endif /* EFSYS_OPT_QSTATS */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_ev_init( __in efx_nic_t *enp); extern void efx_ev_fini( __in efx_nic_t *enp); #define EFX_EVQ_MAXNEVS 32768 #define EFX_EVQ_MINNEVS 512 #define EFX_EVQ_SIZE(_nevs) ((_nevs) * sizeof (efx_qword_t)) #define EFX_EVQ_NBUFS(_nevs) (EFX_EVQ_SIZE(_nevs) / EFX_BUF_SIZE) -extern __checkReturn int +extern __checkReturn efx_rc_t efx_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __deref_out efx_evq_t **eepp); extern void efx_ev_qpost( __in efx_evq_t *eep, __in uint16_t data); typedef __checkReturn boolean_t (*efx_initialized_ev_t)( __in_opt void *arg); #define EFX_PKT_UNICAST 0x0004 #define EFX_PKT_START 0x0008 #define EFX_PKT_VLAN_TAGGED 0x0010 #define EFX_CKSUM_TCPUDP 0x0020 #define EFX_CKSUM_IPV4 0x0040 #define EFX_PKT_CONT 0x0080 #define EFX_CHECK_VLAN 0x0100 #define EFX_PKT_TCP 0x0200 #define EFX_PKT_UDP 0x0400 #define EFX_PKT_IPV4 0x0800 #define EFX_PKT_IPV6 0x1000 #define EFX_PKT_PREFIX_LEN 0x2000 #define EFX_ADDR_MISMATCH 0x4000 #define EFX_DISCARD 0x8000 #define EFX_EV_RX_NLABELS 32 #define EFX_EV_TX_NLABELS 32 typedef __checkReturn boolean_t (*efx_rx_ev_t)( __in_opt void *arg, __in uint32_t label, __in uint32_t id, __in uint32_t size, __in uint16_t flags); typedef __checkReturn boolean_t (*efx_tx_ev_t)( __in_opt void *arg, __in uint32_t label, __in uint32_t id); #define EFX_EXCEPTION_RX_RECOVERY 0x00000001 #define EFX_EXCEPTION_RX_DSC_ERROR 0x00000002 #define EFX_EXCEPTION_TX_DSC_ERROR 0x00000003 #define EFX_EXCEPTION_UNKNOWN_SENSOREVT 0x00000004 #define EFX_EXCEPTION_FWALERT_SRAM 0x00000005 #define EFX_EXCEPTION_UNKNOWN_FWALERT 0x00000006 #define EFX_EXCEPTION_RX_ERROR 0x00000007 #define EFX_EXCEPTION_TX_ERROR 0x00000008 #define EFX_EXCEPTION_EV_ERROR 0x00000009 typedef __checkReturn boolean_t (*efx_exception_ev_t)( __in_opt void *arg, __in uint32_t label, __in uint32_t data); typedef __checkReturn boolean_t (*efx_rxq_flush_done_ev_t)( __in_opt void *arg, __in uint32_t rxq_index); typedef __checkReturn boolean_t (*efx_rxq_flush_failed_ev_t)( __in_opt void *arg, __in uint32_t rxq_index); typedef __checkReturn boolean_t (*efx_txq_flush_done_ev_t)( __in_opt void *arg, __in uint32_t txq_index); typedef __checkReturn boolean_t (*efx_software_ev_t)( __in_opt void *arg, __in uint16_t magic); typedef __checkReturn boolean_t (*efx_sram_ev_t)( __in_opt void *arg, __in uint32_t code); #define EFX_SRAM_CLEAR 0 #define EFX_SRAM_UPDATE 1 #define EFX_SRAM_ILLEGAL_CLEAR 2 typedef __checkReturn boolean_t (*efx_wake_up_ev_t)( __in_opt void *arg, __in uint32_t label); typedef __checkReturn boolean_t (*efx_timer_ev_t)( __in_opt void *arg, __in uint32_t label); typedef __checkReturn boolean_t (*efx_link_change_ev_t)( __in_opt void *arg, __in efx_link_mode_t link_mode); #if EFSYS_OPT_MON_STATS typedef __checkReturn boolean_t (*efx_monitor_ev_t)( __in_opt void *arg, __in efx_mon_stat_t id, __in efx_mon_stat_value_t value); #endif /* EFSYS_OPT_MON_STATS */ #if EFSYS_OPT_MAC_STATS typedef __checkReturn boolean_t (*efx_mac_stats_ev_t)( __in_opt void *arg, __in uint32_t generation ); #endif /* EFSYS_OPT_MAC_STATS */ typedef struct efx_ev_callbacks_s { efx_initialized_ev_t eec_initialized; efx_rx_ev_t eec_rx; efx_tx_ev_t eec_tx; efx_exception_ev_t eec_exception; efx_rxq_flush_done_ev_t eec_rxq_flush_done; efx_rxq_flush_failed_ev_t eec_rxq_flush_failed; efx_txq_flush_done_ev_t eec_txq_flush_done; efx_software_ev_t eec_software; efx_sram_ev_t eec_sram; efx_wake_up_ev_t eec_wake_up; efx_timer_ev_t eec_timer; efx_link_change_ev_t eec_link_change; #if EFSYS_OPT_MON_STATS efx_monitor_ev_t eec_monitor; #endif /* EFSYS_OPT_MON_STATS */ #if EFSYS_OPT_MAC_STATS efx_mac_stats_ev_t eec_mac_stats; #endif /* EFSYS_OPT_MAC_STATS */ } efx_ev_callbacks_t; extern __checkReturn boolean_t efx_ev_qpending( __in efx_evq_t *eep, __in unsigned int count); #if EFSYS_OPT_EV_PREFETCH extern void efx_ev_qprefetch( __in efx_evq_t *eep, __in unsigned int count); #endif /* EFSYS_OPT_EV_PREFETCH */ extern void efx_ev_qpoll( __in efx_evq_t *eep, __inout unsigned int *countp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_ev_qprime( __in efx_evq_t *eep, __in unsigned int count); #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES extern const char * efx_ev_qstat_name( __in efx_nic_t *enp, __in unsigned int id); #endif /* EFSYS_OPT_NAMES */ extern void efx_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ extern void efx_ev_qdestroy( __in efx_evq_t *eep); /* RX */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_rx_init( __inout efx_nic_t *enp); extern void efx_rx_fini( __in efx_nic_t *enp); #if EFSYS_OPT_RX_HDR_SPLIT - __checkReturn int + __checkReturn efx_rc_t efx_rx_hdr_split_enable( __in efx_nic_t *enp, __in unsigned int hdr_buf_size, __in unsigned int pld_buf_size); #endif /* EFSYS_OPT_RX_HDR_SPLIT */ #if EFSYS_OPT_RX_SCATTER - __checkReturn int + __checkReturn efx_rc_t efx_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size); #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE typedef enum efx_rx_hash_alg_e { EFX_RX_HASHALG_LFSR = 0, EFX_RX_HASHALG_TOEPLITZ } efx_rx_hash_alg_t; typedef enum efx_rx_hash_type_e { EFX_RX_HASH_IPV4 = 0, EFX_RX_HASH_TCPIPV4, EFX_RX_HASH_IPV6, EFX_RX_HASH_TCPIPV6, } efx_rx_hash_type_t; typedef enum efx_rx_hash_support_e { EFX_RX_HASH_UNAVAILABLE = 0, /* Hardware hash not inserted */ EFX_RX_HASH_AVAILABLE /* Insert hash with/without RSS */ } efx_rx_hash_support_t; #define EFX_RSS_TBL_SIZE 128 /* Rows in RX indirection table */ #define EFX_MAXRSS 64 /* RX indirection entry range */ #define EFX_MAXRSS_LEGACY 16 /* See bug16611 and bug17213 */ typedef enum efx_rx_scale_support_e { EFX_RX_SCALE_UNAVAILABLE = 0, /* Not supported */ EFX_RX_SCALE_EXCLUSIVE, /* Writable key/indirection table */ EFX_RX_SCALE_SHARED /* Read-only key/indirection table */ } efx_rx_scale_support_t; - extern __checkReturn int +extern __checkReturn efx_rc_t efx_rx_hash_support_get( __in efx_nic_t *enp, __out efx_rx_hash_support_t *supportp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_rx_scale_support_get( __in efx_nic_t *enp, __out efx_rx_scale_support_t *supportp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_rx_scale_mode_set( __in efx_nic_t *enp, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_rx_scale_tbl_set( __in efx_nic_t *enp, __in_ecount(n) unsigned int *table, __in size_t n); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_rx_scale_key_set( __in efx_nic_t *enp, __in_ecount(n) uint8_t *key, __in size_t n); extern uint32_t efx_psuedo_hdr_hash_get( __in efx_nic_t *enp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer); #endif /* EFSYS_OPT_RX_SCALE */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_psuedo_hdr_pkt_length_get( __in efx_nic_t *enp, __in uint8_t *buffer, __out uint16_t *pkt_lengthp); #define EFX_RXQ_MAXNDESCS 4096 #define EFX_RXQ_MINNDESCS 512 #define EFX_RXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t)) #define EFX_RXQ_NBUFS(_ndescs) (EFX_RXQ_SIZE(_ndescs) / EFX_BUF_SIZE) #define EFX_RXQ_LIMIT(_ndescs) ((_ndescs) - 16) #define EFX_RXQ_DC_NDESCS(_dcsize) (8 << _dcsize) typedef enum efx_rxq_type_e { EFX_RXQ_TYPE_DEFAULT, EFX_RXQ_TYPE_SPLIT_HEADER, EFX_RXQ_TYPE_SPLIT_PAYLOAD, EFX_RXQ_TYPE_SCATTER, EFX_RXQ_NTYPES } efx_rxq_type_t; -extern __checkReturn int +extern __checkReturn efx_rc_t efx_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __deref_out efx_rxq_t **erpp); typedef struct efx_buffer_s { efsys_dma_addr_t eb_addr; size_t eb_size; boolean_t eb_eop; } efx_buffer_t; typedef struct efx_desc_s { efx_qword_t ed_eq; } efx_desc_t; extern void efx_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added); extern void efx_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_rx_qflush( __in efx_rxq_t *erp); extern void efx_rx_qenable( __in efx_rxq_t *erp); extern void efx_rx_qdestroy( __in efx_rxq_t *erp); /* TX */ typedef struct efx_txq_s efx_txq_t; #if EFSYS_OPT_QSTATS /* START MKCONFIG GENERATED EfxHeaderTransmitQueueBlock 12dff8778598b2db */ typedef enum efx_tx_qstat_e { TX_POST, TX_POST_PIO, TX_NQSTATS } efx_tx_qstat_t; /* END MKCONFIG GENERATED EfxHeaderTransmitQueueBlock */ #endif /* EFSYS_OPT_QSTATS */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_tx_init( __in efx_nic_t *enp); extern void efx_tx_fini( __in efx_nic_t *enp); #define EFX_BUG35388_WORKAROUND(_encp) \ (((_encp) == NULL) ? 1 : ((_encp)->enc_bug35388_workaround != 0)) #define EFX_TXQ_MAXNDESCS(_encp) \ ((EFX_BUG35388_WORKAROUND(_encp)) ? 2048 : 4096) #define EFX_TXQ_MINNDESCS 512 #define EFX_TXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t)) #define EFX_TXQ_NBUFS(_ndescs) (EFX_TXQ_SIZE(_ndescs) / EFX_BUF_SIZE) #define EFX_TXQ_LIMIT(_ndescs) ((_ndescs) - 16) #define EFX_TXQ_DC_NDESCS(_dcsize) (8 << _dcsize) #define EFX_TXQ_MAX_BUFS 8 /* Maximum independent of EFX_BUG35388_WORKAROUND. */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __deref_out efx_txq_t **etpp, __out unsigned int *addedp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_tx_qpost( __in efx_txq_t *etp, __in_ecount(n) efx_buffer_t *eb, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns); extern void efx_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_tx_qflush( __in efx_txq_t *etp); extern void efx_tx_qenable( __in efx_txq_t *etp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_tx_qpio_enable( __in efx_txq_t *etp); extern void efx_tx_qpio_disable( __in efx_txq_t *etp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_tx_qpio_write( __in efx_txq_t *etp, __in_ecount(buf_length) uint8_t *buffer, __in size_t buf_length, __in size_t pio_buf_offset); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_tx_qpio_post( __in efx_txq_t *etp, __in size_t pkt_length, __in unsigned int completed, __inout unsigned int *addedp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_tx_qdesc_post( __in efx_txq_t *etp, __in_ecount(n) efx_desc_t *ed, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); extern void efx_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp); extern void efx_tx_qdesc_tso_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint8_t tcp_flags, __out efx_desc_t *edp); extern void efx_tx_qdesc_vlantci_create( __in efx_txq_t *etp, __in uint16_t tci, __out efx_desc_t *edp); #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES extern const char * efx_tx_qstat_name( __in efx_nic_t *etp, __in unsigned int id); #endif /* EFSYS_OPT_NAMES */ extern void efx_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ extern void efx_tx_qdestroy( __in efx_txq_t *etp); /* FILTER */ #if EFSYS_OPT_FILTER #define EFX_ETHER_TYPE_IPV4 0x0800 #define EFX_ETHER_TYPE_IPV6 0x86DD #define EFX_IPPROTO_TCP 6 #define EFX_IPPROTO_UDP 17 typedef enum efx_filter_flag_e { EFX_FILTER_FLAG_RX_RSS = 0x01, /* use RSS to spread across * multiple queues */ EFX_FILTER_FLAG_RX_SCATTER = 0x02, /* enable RX scatter */ EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04, /* Override an automatic filter * (priority EFX_FILTER_PRI_AUTO). * May only be set by the filter * implementation for each type. * A removal request will * restore the automatic filter * in its place. */ EFX_FILTER_FLAG_RX = 0x08, /* Filter is for RX */ EFX_FILTER_FLAG_TX = 0x10, /* Filter is for TX */ } efx_filter_flag_t; typedef enum efx_filter_match_flags_e { EFX_FILTER_MATCH_REM_HOST = 0x0001, /* Match by remote IP host * address */ EFX_FILTER_MATCH_LOC_HOST = 0x0002, /* Match by local IP host * address */ EFX_FILTER_MATCH_REM_MAC = 0x0004, /* Match by remote MAC address */ EFX_FILTER_MATCH_REM_PORT = 0x0008, /* Match by remote TCP/UDP port */ EFX_FILTER_MATCH_LOC_MAC = 0x0010, /* Match by remote TCP/UDP port */ EFX_FILTER_MATCH_LOC_PORT = 0x0020, /* Match by local TCP/UDP port */ EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, /* Match by Ether-type */ EFX_FILTER_MATCH_INNER_VID = 0x0080, /* Match by inner VLAN ID */ EFX_FILTER_MATCH_OUTER_VID = 0x0100, /* Match by outer VLAN ID */ EFX_FILTER_MATCH_IP_PROTO = 0x0200, /* Match by IP transport * protocol */ EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400, /* Match by local MAC address * I/G bit. Used for RX default * unicast and multicast/ * broadcast filters. */ } efx_filter_match_flags_t; typedef enum efx_filter_priority_s { EFX_FILTER_PRI_HINT = 0, /* Performance hint */ EFX_FILTER_PRI_AUTO, /* Automatic filter based on device * address list or hardware * requirements. This may only be used * by the filter implementation for * each NIC type. */ EFX_FILTER_PRI_MANUAL, /* Manually configured filter */ EFX_FILTER_PRI_REQUIRED, /* Required for correct behaviour of the * client (e.g. SR-IOV, HyperV VMQ etc.) */ } efx_filter_priority_t; /* * FIXME: All these fields are assumed to be in little-endian byte order. * It may be better for some to be big-endian. See bug42804. */ typedef struct efx_filter_spec_s { uint32_t efs_match_flags:12; uint32_t efs_priority:2; uint32_t efs_flags:6; uint32_t efs_dmaq_id:12; uint32_t efs_rss_context; uint16_t efs_outer_vid; uint16_t efs_inner_vid; uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN]; uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN]; uint16_t efs_ether_type; uint8_t efs_ip_proto; uint16_t efs_loc_port; uint16_t efs_rem_port; efx_oword_t efs_rem_host; efx_oword_t efs_loc_host; } efx_filter_spec_t; /* Default values for use in filter specifications */ #define EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT 0xffffffff #define EFX_FILTER_SPEC_RX_DMAQ_ID_DROP 0xfff #define EFX_FILTER_SPEC_VID_UNSPEC 0xffff -extern __checkReturn int +extern __checkReturn efx_rc_t efx_filter_init( __in efx_nic_t *enp); extern void efx_filter_fini( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_filter_insert( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_filter_remove( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_filter_restore( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_filter_supported_filters( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length); extern void efx_filter_spec_init_rx( __inout efx_filter_spec_t *spec, __in efx_filter_priority_t priority, __in efx_filter_flag_t flags, __in efx_rxq_t *erp); extern void efx_filter_spec_init_tx( __inout efx_filter_spec_t *spec, __in efx_txq_t *etp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_filter_spec_set_ipv4_local( __inout efx_filter_spec_t *spec, __in uint8_t proto, __in uint32_t host, __in uint16_t port); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_filter_spec_set_ipv4_full( __inout efx_filter_spec_t *spec, __in uint8_t proto, __in uint32_t lhost, __in uint16_t lport, __in uint32_t rhost, __in uint16_t rport); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_filter_spec_set_eth_local( __inout efx_filter_spec_t *spec, __in uint16_t vid, __in const uint8_t *addr); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_filter_spec_set_uc_def( __inout efx_filter_spec_t *spec); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_filter_spec_set_mc_def( __inout efx_filter_spec_t *spec); #endif /* EFSYS_OPT_FILTER */ /* HASH */ extern __checkReturn uint32_t efx_hash_dwords( __in_ecount(count) uint32_t const *input, __in size_t count, __in uint32_t init); extern __checkReturn uint32_t efx_hash_bytes( __in_ecount(length) uint8_t const *input, __in size_t length, __in uint32_t init); #ifdef __cplusplus } #endif #endif /* _SYS_EFX_H */ Index: stable/10/sys/dev/sfxge/common/efx_bootcfg.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_bootcfg.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_bootcfg.c (revision 293927) @@ -1,350 +1,350 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_impl.h" #if EFSYS_OPT_BOOTCFG /* * Maximum size of BOOTCFG block across all nics as understood by SFCgPXE. * A multiple of 0x100 so trailing 0xff characters don't contrinbute to the * checksum. */ #define BOOTCFG_MAX_SIZE 0x1000 #define DHCP_END (uint8_t)0xff #define DHCP_PAD (uint8_t)0 static __checkReturn uint8_t efx_bootcfg_csum( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { _NOTE(ARGUNUSED(enp)) unsigned int pos; uint8_t checksum = 0; for (pos = 0; pos < size; pos++) checksum += data[pos]; return (checksum); } -static __checkReturn int +static __checkReturn efx_rc_t efx_bootcfg_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __out_opt size_t *usedp) { size_t offset = 0; size_t used = 0; - int rc; + efx_rc_t rc; /* Start parsing tags immediatly after the checksum */ for (offset = 1; offset < size; ) { uint8_t tag; uint8_t length; /* Consume tag */ tag = data[offset]; if (tag == DHCP_END) { offset++; used = offset; break; } if (tag == DHCP_PAD) { offset++; continue; } /* Consume length */ if (offset + 1 >= size) { rc = ENOSPC; goto fail1; } length = data[offset + 1]; /* Consume *length */ if (offset + 1 + length >= size) { rc = ENOSPC; goto fail2; } offset += 2 + length; used = offset; } /* Checksum the entire sector, including bytes after any DHCP_END */ if (efx_bootcfg_csum(enp, data, size) != 0) { rc = EINVAL; goto fail3; } if (usedp != NULL) *usedp = used; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - int + efx_rc_t efx_bootcfg_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size) { uint8_t *payload = NULL; size_t used_bytes; size_t sector_length; - int rc; + efx_rc_t rc; rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, §or_length); if (rc != 0) goto fail1; /* * We need to read the entire BOOTCFG area to ensure we read all the * tags, because legacy bootcfg sectors are not guaranteed to end with * a DHCP_END character. If the user hasn't supplied a sufficiently * large buffer then use our own buffer. */ if (sector_length > BOOTCFG_MAX_SIZE) sector_length = BOOTCFG_MAX_SIZE; if (sector_length > size) { EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload); if (payload == NULL) { rc = ENOMEM; goto fail2; } } else payload = (uint8_t *)data; if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0) goto fail3; rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, 0, (caddr_t)payload, sector_length); efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG); if (rc != 0) goto fail4; /* Verify that the area is correctly formatted and checksummed */ rc = efx_bootcfg_verify(enp, (caddr_t)payload, sector_length, &used_bytes); if (rc != 0 || used_bytes == 0) { payload[0] = (uint8_t)~DHCP_END; payload[1] = DHCP_END; used_bytes = 2; } EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */ EFSYS_ASSERT(used_bytes <= sector_length); /* * Legacy bootcfg sectors don't terminate with a DHCP_END character. * Modify the returned payload so it does. BOOTCFG_MAX_SIZE is by * definition large enough for any valid (per-port) bootcfg sector, * so reinitialise the sector if there isn't room for the character. */ if (payload[used_bytes - 1] != DHCP_END) { if (used_bytes + 1 > sector_length) { payload[0] = 0; used_bytes = 1; } payload[used_bytes] = DHCP_END; ++used_bytes; } /* * Verify that the user supplied buffer is large enough for the * entire used bootcfg area, then copy into the user supplied buffer. */ if (used_bytes > size) { rc = ENOSPC; goto fail5; } if (sector_length > size) { memcpy(data, payload, used_bytes); EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload); } /* Zero out the unused portion of the user buffer */ if (used_bytes < size) (void) memset(data + used_bytes, 0, size - used_bytes); /* * The checksum includes trailing data after any DHCP_END character, * which we've just modified (by truncation or appending DHCP_END). */ data[0] -= efx_bootcfg_csum(enp, data, size); return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); if (sector_length > size) EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - int + efx_rc_t efx_bootcfg_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { uint8_t *chunk; uint8_t checksum; size_t sector_length; size_t chunk_length; size_t used_bytes; size_t offset; size_t remaining; - int rc; + efx_rc_t rc; rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, §or_length); if (rc != 0) goto fail1; if (sector_length > BOOTCFG_MAX_SIZE) sector_length = BOOTCFG_MAX_SIZE; if ((rc = efx_bootcfg_verify(enp, data, size, &used_bytes)) != 0) goto fail2; /* The caller *must* terminate their block with a DHCP_END character */ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */ if ((uint8_t)data[used_bytes - 1] != DHCP_END) { rc = ENOENT; goto fail3; } /* Check that the hardware has support for this much data */ if (used_bytes > MIN(sector_length, BOOTCFG_MAX_SIZE)) { rc = ENOSPC; goto fail4; } rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, &chunk_length); if (rc != 0) goto fail5; EFSYS_KMEM_ALLOC(enp->en_esip, chunk_length, chunk); if (chunk == NULL) { rc = ENOMEM; goto fail6; } if ((rc = efx_nvram_erase(enp, EFX_NVRAM_BOOTROM_CFG)) != 0) goto fail7; /* * Write the entire sector_length bytes of data in chunks. Zero out * all data following the DHCP_END, and adjust the checksum */ checksum = efx_bootcfg_csum(enp, data, used_bytes); for (offset = 0; offset < sector_length; offset += remaining) { remaining = MIN(chunk_length, sector_length - offset); /* Fill chunk */ (void) memset(chunk, 0x0, chunk_length); if (offset < used_bytes) memcpy(chunk, data + offset, MIN(remaining, used_bytes - offset)); /* Adjust checksum */ if (offset == 0) chunk[0] -= checksum; if ((rc = efx_nvram_write_chunk(enp, EFX_NVRAM_BOOTROM_CFG, offset, (caddr_t)chunk, remaining)) != 0) goto fail8; } efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG); EFSYS_KMEM_FREE(enp->en_esip, chunk_length, chunk); return (0); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); EFSYS_KMEM_FREE(enp->en_esip, chunk_length, chunk); fail6: EFSYS_PROBE(fail6); efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_BOOTCFG */ Index: stable/10/sys/dev/sfxge/common/efx_ev.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_ev.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_ev.c (revision 293927) @@ -1,1465 +1,1465 @@ /*- * Copyright (c) 2007-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #include "mcdi_mon.h" #if EFSYS_OPT_QSTATS #define EFX_EV_QSTAT_INCR(_eep, _stat) \ do { \ (_eep)->ee_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_EV_QSTAT_INCR(_eep, _stat) #endif #define EFX_EV_PRESENT(_qword) \ (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \ EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff) #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_ev_init( __in efx_nic_t *enp); static void falconsiena_ev_fini( __in efx_nic_t *enp); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep); static void falconsiena_ev_qdestroy( __in efx_evq_t *eep); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_ev_qprime( __in efx_evq_t *eep, __in unsigned int count); static void falconsiena_ev_qpoll( __in efx_evq_t *eep, __inout unsigned int *countp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static void falconsiena_ev_qpost( __in efx_evq_t *eep, __in uint16_t data); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us); #if EFSYS_OPT_QSTATS static void falconsiena_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); #endif #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ #if EFSYS_OPT_FALCON static efx_ev_ops_t __efx_ev_falcon_ops = { falconsiena_ev_init, /* eevo_init */ falconsiena_ev_fini, /* eevo_fini */ falconsiena_ev_qcreate, /* eevo_qcreate */ falconsiena_ev_qdestroy, /* eevo_qdestroy */ falconsiena_ev_qprime, /* eevo_qprime */ falconsiena_ev_qpost, /* eevo_qpost */ falconsiena_ev_qmoderate, /* eevo_qmoderate */ #if EFSYS_OPT_QSTATS falconsiena_ev_qstats_update, /* eevo_qstats_update */ #endif }; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA static efx_ev_ops_t __efx_ev_siena_ops = { falconsiena_ev_init, /* eevo_init */ falconsiena_ev_fini, /* eevo_fini */ falconsiena_ev_qcreate, /* eevo_qcreate */ falconsiena_ev_qdestroy, /* eevo_qdestroy */ falconsiena_ev_qprime, /* eevo_qprime */ falconsiena_ev_qpost, /* eevo_qpost */ falconsiena_ev_qmoderate, /* eevo_qmoderate */ #if EFSYS_OPT_QSTATS falconsiena_ev_qstats_update, /* eevo_qstats_update */ #endif }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static efx_ev_ops_t __efx_ev_hunt_ops = { hunt_ev_init, /* eevo_init */ hunt_ev_fini, /* eevo_fini */ hunt_ev_qcreate, /* eevo_qcreate */ hunt_ev_qdestroy, /* eevo_qdestroy */ hunt_ev_qprime, /* eevo_qprime */ hunt_ev_qpost, /* eevo_qpost */ hunt_ev_qmoderate, /* eevo_qmoderate */ #if EFSYS_OPT_QSTATS hunt_ev_qstats_update, /* eevo_qstats_update */ #endif }; #endif /* EFSYS_OPT_HUNTINGTON */ - __checkReturn int + __checkReturn efx_rc_t efx_ev_init( __in efx_nic_t *enp) { efx_ev_ops_t *eevop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); if (enp->en_mod_flags & EFX_MOD_EV) { rc = EINVAL; goto fail1; } switch (enp->en_family) { #if EFSYS_OPT_FALCON case EFX_FAMILY_FALCON: eevop = (efx_ev_ops_t *)&__efx_ev_falcon_ops; break; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: eevop = (efx_ev_ops_t *)&__efx_ev_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: eevop = (efx_ev_ops_t *)&__efx_ev_hunt_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); if ((rc = eevop->eevo_init(enp)) != 0) goto fail2; enp->en_eevop = eevop; enp->en_mod_flags |= EFX_MOD_EV; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_eevop = NULL; enp->en_mod_flags &= ~EFX_MOD_EV; return (rc); } void efx_ev_fini( __in efx_nic_t *enp) { efx_ev_ops_t *eevop = enp->en_eevop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); eevop->eevo_fini(enp); enp->en_eevop = NULL; enp->en_mod_flags &= ~EFX_MOD_EV; } - __checkReturn int + __checkReturn efx_rc_t efx_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __deref_out efx_evq_t **eepp) { efx_ev_ops_t *eevop = enp->en_eevop; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_evq_t *eep; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit); /* Allocate an EVQ object */ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep); if (eep == NULL) { rc = ENOMEM; goto fail1; } eep->ee_magic = EFX_EVQ_MAGIC; eep->ee_enp = enp; eep->ee_index = index; eep->ee_mask = n - 1; eep->ee_esmp = esmp; if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, eep)) != 0) goto fail2; enp->en_ev_qcount++; *eepp = eep; return (0); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_ev_qdestroy( __in efx_evq_t *eep) { efx_nic_t *enp = eep->ee_enp; efx_ev_ops_t *eevop = enp->en_eevop; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); EFSYS_ASSERT(enp->en_ev_qcount != 0); --enp->en_ev_qcount; eevop->eevo_qdestroy(eep); /* Free the EVQ object */ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); } - __checkReturn int + __checkReturn efx_rc_t efx_ev_qprime( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; efx_ev_ops_t *eevop = enp->en_eevop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); if (!(enp->en_mod_flags & EFX_MOD_INTR)) { rc = EINVAL; goto fail1; } if ((rc = eevop->eevo_qprime(eep, count)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn boolean_t efx_ev_qpending( __in efx_evq_t *eep, __in unsigned int count) { size_t offset; efx_qword_t qword; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword); return (EFX_EV_PRESENT(qword)); } #if EFSYS_OPT_EV_PREFETCH void efx_ev_qprefetch( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; unsigned int offset; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); } #endif /* EFSYS_OPT_EV_PREFETCH */ void efx_ev_qpoll( __in efx_evq_t *eep, __inout unsigned int *countp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); /* * FIXME: Huntington will require support for hardware event batching * and merging, which will need a different ev_qpoll implementation. * * Without those features the Falcon/Siena code can be used unchanged. */ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN); EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH); EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV); EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV); EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV); EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV == FSE_AZ_EV_CODE_DRV_GEN_EV); #if EFSYS_OPT_MCDI EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV == FSE_AZ_EV_CODE_MCDI_EVRESPONSE); #endif falconsiena_ev_qpoll(eep, countp, eecp, arg); } void efx_ev_qpost( __in efx_evq_t *eep, __in uint16_t data) { efx_nic_t *enp = eep->ee_enp; efx_ev_ops_t *eevop = enp->en_eevop; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); EFSYS_ASSERT(eevop != NULL && eevop->eevo_qpost != NULL); eevop->eevo_qpost(eep, data); } - __checkReturn int + __checkReturn efx_rc_t efx_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us) { efx_nic_t *enp = eep->ee_enp; efx_ev_ops_t *eevop = enp->en_eevop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); if ((rc = eevop->eevo_qmoderate(eep, us)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_QSTATS void efx_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) { efx_nic_t *enp = eep->ee_enp; efx_ev_ops_t *eevop = enp->en_eevop; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); eevop->eevo_qstats_update(eep, stat); } #endif /* EFSYS_OPT_QSTATS */ #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_ev_init( __in efx_nic_t *enp) { efx_oword_t oword; /* * Program the event queue for receive and transmit queue * flush events. */ EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0); EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword); return (0); } static __checkReturn boolean_t falconsiena_ev_rx_not_ok( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in uint32_t label, __in uint32_t id, __inout uint16_t *flagsp) { boolean_t ignore = B_FALSE; if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC); EFSYS_PROBE(tobe_disc); /* * Assume this is a unicast address mismatch, unless below * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or * EV_RX_PAUSE_FRM_ERR is set. */ (*flagsp) |= EFX_ADDR_MISMATCH; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) { EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id); EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); (*flagsp) |= EFX_DISCARD; #if (EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER) /* * Lookout for payload queue ran dry errors and ignore them. * * Sadly for the header/data split cases, the descriptor * pointer in this event refers to the header queue and * therefore cannot be easily detected as duplicate. * So we drop these and rely on the receive processing seeing * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard * the partially received packet. */ if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) && (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) && (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0)) ignore = B_TRUE; #endif /* EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER */ } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); EFSYS_PROBE(crc_err); (*flagsp) &= ~EFX_ADDR_MISMATCH; (*flagsp) |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR); EFSYS_PROBE(pause_frm_err); (*flagsp) &= ~EFX_ADDR_MISMATCH; (*flagsp) |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR); EFSYS_PROBE(owner_id_err); (*flagsp) |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); EFSYS_PROBE(ipv4_err); (*flagsp) &= ~EFX_CKSUM_IPV4; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); EFSYS_PROBE(udp_chk_err); (*flagsp) &= ~EFX_CKSUM_TCPUDP; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR); /* * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error * condition. */ (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP); } return (ignore); } static __checkReturn boolean_t falconsiena_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; uint32_t id; uint32_t size; uint32_t label; boolean_t ok; #if (EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER) boolean_t sop; boolean_t jumbo_cont; #endif /* EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER */ uint32_t hdr_type; boolean_t is_v6; uint16_t flags; boolean_t ignore; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_RX); /* Basic packet information */ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR); size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT); label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL); ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0); #if (EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER) sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0); jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0); #endif /* EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER */ hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE); is_v6 = (enp->en_family != EFX_FAMILY_FALCON && EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0); /* * If packet is marked as OK and packet type is TCP/IP or * UDP/IP or other IP, then we can rely on the hardware checksums. */ switch (hdr_type) { case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP: flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP; if (is_v6) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); flags |= EFX_PKT_IPV6; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; } break; case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP: flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP; if (is_v6) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); flags |= EFX_PKT_IPV6; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; } break; case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: if (is_v6) { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); flags = EFX_PKT_IPV6; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4; } break; case FSE_AZ_RX_EV_HDR_TYPE_OTHER: EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); flags = 0; break; default: EFSYS_ASSERT(B_FALSE); flags = 0; break; } #if EFSYS_OPT_RX_SCATTER || EFSYS_OPT_RX_HDR_SPLIT /* Report scatter and header/lookahead split buffer flags */ if (sop) flags |= EFX_PKT_START; if (jumbo_cont) flags |= EFX_PKT_CONT; #endif /* EFSYS_OPT_RX_SCATTER || EFSYS_OPT_RX_HDR_SPLIT */ /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */ if (!ok) { ignore = falconsiena_ev_rx_not_ok(eep, eqp, label, id, &flags); if (ignore) { EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, uint32_t, size, uint16_t, flags); return (B_FALSE); } } /* If we're not discarding the packet then it is ok */ if (~flags & EFX_DISCARD) EFX_EV_QSTAT_INCR(eep, EV_RX_OK); /* Detect multicast packets that didn't match the filter */ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH); } else { EFSYS_PROBE(mcast_mismatch); flags |= EFX_ADDR_MISMATCH; } } else { flags |= EFX_PKT_UNICAST; } /* * The packet parser in Siena can abort parsing packets under * certain error conditions, setting the PKT_NOT_PARSED bit * (which clears PKT_OK). If this is set, then don't trust * the PKT_TYPE field. */ if (enp->en_family != EFX_FAMILY_FALCON && !ok) { uint32_t parse_err; parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED); if (parse_err != 0) flags |= EFX_CHECK_VLAN; } if (~flags & EFX_CHECK_VLAN) { uint32_t pkt_type; pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE); if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN) flags |= EFX_PKT_VLAN_TAGGED; } EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, uint32_t, size, uint16_t, flags); EFSYS_ASSERT(eecp->eec_rx != NULL); should_abort = eecp->eec_rx(arg, label, id, size, flags); return (should_abort); } static __checkReturn boolean_t falconsiena_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t id; uint32_t label; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_TX); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 && EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 && EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 && EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) { id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR); label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL); EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); EFSYS_ASSERT(eecp->eec_tx != NULL); should_abort = eecp->eec_tx(arg, label, id); return (should_abort); } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0) EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0) EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0) EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0) EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL); EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED); return (B_FALSE); } static __checkReturn boolean_t falconsiena_ev_global( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; efx_port_t *epp = &(enp->en_port); boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_GLOBAL); should_abort = B_FALSE; /* Check for a link management event */ if (EFX_QWORD_FIELD(*eqp, FSF_BZ_GLB_EV_XG_MNT_INTR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_GLOBAL_MNT); EFSYS_PROBE(xg_mgt); epp->ep_mac_poll_needed = B_TRUE; } return (should_abort); } static __checkReturn boolean_t falconsiena_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRIVER); should_abort = B_FALSE; switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) { case FSE_AZ_TX_DESCQ_FLS_DONE_EV: { uint32_t txq_index; EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); should_abort = eecp->eec_txq_flush_done(arg, txq_index); break; } case FSE_AZ_RX_DESCQ_FLS_DONE_EV: { uint32_t rxq_index; uint32_t failed; rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL); if (failed) { EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED); EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index); should_abort = eecp->eec_rxq_flush_failed(arg, rxq_index); } else { EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); } break; } case FSE_AZ_EVQ_INIT_DONE_EV: EFSYS_ASSERT(eecp->eec_initialized != NULL); should_abort = eecp->eec_initialized(arg); break; case FSE_AZ_EVQ_NOT_EN_EV: EFSYS_PROBE(evq_not_en); break; case FSE_AZ_SRM_UPD_DONE_EV: { uint32_t code; EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE); code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_ASSERT(eecp->eec_sram != NULL); should_abort = eecp->eec_sram(arg, code); break; } case FSE_AZ_WAKE_UP_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_ASSERT(eecp->eec_wake_up != NULL); should_abort = eecp->eec_wake_up(arg, id); break; } case FSE_AZ_TX_PKT_NON_TCP_UDP: EFSYS_PROBE(tx_pkt_non_tcp_udp); break; case FSE_AZ_TIMER_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_ASSERT(eecp->eec_timer != NULL); should_abort = eecp->eec_timer(arg, id); break; } case FSE_AZ_RX_DSC_ERROR_EV: EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR); EFSYS_PROBE(rx_dsc_error); EFSYS_ASSERT(eecp->eec_exception != NULL); should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_RX_DSC_ERROR, 0); break; case FSE_AZ_TX_DSC_ERROR_EV: EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR); EFSYS_PROBE(tx_dsc_error); EFSYS_ASSERT(eecp->eec_exception != NULL); should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_TX_DSC_ERROR, 0); break; default: break; } return (should_abort); } static __checkReturn boolean_t falconsiena_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t data; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0); if (data >= ((uint32_t)1 << 16)) { EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); return (B_TRUE); } EFSYS_ASSERT(eecp->eec_software != NULL); should_abort = eecp->eec_software(arg, (uint16_t)data); return (should_abort); } #if EFSYS_OPT_MCDI static __checkReturn boolean_t falconsiena_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; unsigned code; boolean_t should_abort = B_FALSE; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); if (enp->en_family != EFX_FAMILY_SIENA) goto out; EFSYS_ASSERT(eecp->eec_link_change != NULL); EFSYS_ASSERT(eecp->eec_exception != NULL); #if EFSYS_OPT_MON_STATS EFSYS_ASSERT(eecp->eec_monitor != NULL); #endif EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); switch (code) { case MCDI_EVENT_CODE_BADSSERT: efx_mcdi_ev_death(enp, EINTR); break; case MCDI_EVENT_CODE_CMDDONE: efx_mcdi_ev_cpl(enp, MCDI_EV_FIELD(eqp, CMDDONE_SEQ), MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); break; case MCDI_EVENT_CODE_LINKCHANGE: { efx_link_mode_t link_mode; siena_phy_link_ev(enp, eqp, &link_mode); should_abort = eecp->eec_link_change(arg, link_mode); break; } case MCDI_EVENT_CODE_SENSOREVT: { #if EFSYS_OPT_MON_STATS efx_mon_stat_t id; efx_mon_stat_value_t value; - int rc; + efx_rc_t rc; if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) should_abort = eecp->eec_monitor(arg, id, value); else if (rc == ENOTSUP) { should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_SENSOREVT, MCDI_EV_FIELD(eqp, DATA)); } else EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ #else should_abort = B_FALSE; #endif break; } case MCDI_EVENT_CODE_SCHEDERR: /* Informational only */ break; case MCDI_EVENT_CODE_REBOOT: efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: #if EFSYS_OPT_MAC_STATS if (eecp->eec_mac_stats != NULL) { eecp->eec_mac_stats(arg, MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); } #endif break; case MCDI_EVENT_CODE_FWALERT: { uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_FWALERT_SRAM, MCDI_EV_FIELD(eqp, FWALERT_DATA)); else should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_FWALERT, MCDI_EV_FIELD(eqp, DATA)); break; } default: EFSYS_PROBE1(mc_pcol_error, int, code); break; } out: return (should_abort); } #endif /* EFSYS_OPT_MCDI */ -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_ev_qprime( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; uint32_t rptr; efx_dword_t dword; rptr = count & eep->ee_mask; EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr); EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index, &dword, B_FALSE); return (0); } #define EFX_EV_BATCH 8 static void falconsiena_ev_qpoll( __in efx_evq_t *eep, __inout unsigned int *countp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_qword_t ev[EFX_EV_BATCH]; unsigned int batch; unsigned int total; unsigned int count; unsigned int index; size_t offset; EFSYS_ASSERT(countp != NULL); EFSYS_ASSERT(eecp != NULL); count = *countp; do { /* Read up until the end of the batch period */ batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1)); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); for (total = 0; total < batch; ++total) { EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total])); if (!EFX_EV_PRESENT(ev[total])) break; EFSYS_PROBE3(event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0)); offset += sizeof (efx_qword_t); } #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1) /* * Prefetch the next batch when we get within PREFETCH_PERIOD * of a completed batch. If the batch is smaller, then prefetch * immediately. */ if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD) EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); #endif /* EFSYS_OPT_EV_PREFETCH */ /* Process the batch of events */ for (index = 0; index < total; ++index) { boolean_t should_abort; uint32_t code; #if EFSYS_OPT_EV_PREFETCH /* Prefetch if we've now reached the batch period */ if (total == batch && index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) { offset = (count + batch) & eep->ee_mask; offset *= sizeof (efx_qword_t); EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); } #endif /* EFSYS_OPT_EV_PREFETCH */ EFX_EV_QSTAT_INCR(eep, EV_ALL); code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE); switch (code) { case FSE_AZ_EV_CODE_RX_EV: should_abort = eep->ee_rx(eep, &(ev[index]), eecp, arg); break; case FSE_AZ_EV_CODE_TX_EV: should_abort = eep->ee_tx(eep, &(ev[index]), eecp, arg); break; case FSE_AZ_EV_CODE_DRIVER_EV: should_abort = eep->ee_driver(eep, &(ev[index]), eecp, arg); break; case FSE_AZ_EV_CODE_DRV_GEN_EV: should_abort = eep->ee_drv_gen(eep, &(ev[index]), eecp, arg); break; #if EFSYS_OPT_MCDI case FSE_AZ_EV_CODE_MCDI_EVRESPONSE: should_abort = eep->ee_mcdi(eep, &(ev[index]), eecp, arg); break; #endif case FSE_AZ_EV_CODE_GLOBAL_EV: if (eep->ee_global) { should_abort = eep->ee_global(eep, &(ev[index]), eecp, arg); break; } /* else fallthrough */ default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(ev[index], EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(ev[index], EFX_DWORD_0)); EFSYS_ASSERT(eecp->eec_exception != NULL); (void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code); should_abort = B_TRUE; } if (should_abort) { /* Ignore subsequent events */ total = index + 1; break; } } /* * Now that the hardware has most likely moved onto dma'ing * into the next cache line, clear the processed events. Take * care to only clear out events that we've processed */ EFX_SET_QWORD(ev[0]); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); for (index = 0; index < total; ++index) { EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0])); offset += sizeof (efx_qword_t); } count += total; } while (total == batch); *countp = count; } static void falconsiena_ev_qpost( __in efx_evq_t *eep, __in uint16_t data) { efx_nic_t *enp = eep->ee_enp; efx_qword_t ev; efx_oword_t oword; EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV, FSF_AZ_EV_DATA_DW0, (uint32_t)data); EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index, EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0), EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1)); EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword); } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us) { efx_nic_t *enp = eep->ee_enp; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); unsigned int locked; efx_dword_t dword; - int rc; + efx_rc_t rc; if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail1; } /* If the value is zero then disable the timer */ if (us == 0) { if (enp->en_family == EFX_FAMILY_FALCON) EFX_POPULATE_DWORD_2(dword, FRF_AB_TC_TIMER_MODE, FFE_AB_TIMER_MODE_DIS, FRF_AB_TC_TIMER_VAL, 0); else EFX_POPULATE_DWORD_2(dword, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS, FRF_CZ_TC_TIMER_VAL, 0); } else { uint32_t timer_val; /* Calculate the timer value in quanta */ timer_val = us * 1000 / encp->enc_evq_timer_quantum_ns; /* Moderation value is base 0 so we need to deduct 1 */ if (timer_val > 0) timer_val--; if (enp->en_family == EFX_FAMILY_FALCON) EFX_POPULATE_DWORD_2(dword, FRF_AB_TC_TIMER_MODE, FFE_AB_TIMER_MODE_INT_HLDOFF, FRF_AB_TIMER_VAL, timer_val); else EFX_POPULATE_DWORD_2(dword, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF, FRF_CZ_TC_TIMER_VAL, timer_val); } locked = (eep->ee_index == 0) ? 1 : 0; EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0, eep->ee_index, &dword, locked); return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t size; efx_oword_t oword; - int rc; + efx_rc_t rc; EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_evq_limit) { rc = EINVAL; goto fail2; } #if EFSYS_OPT_RX_SCALE if (enp->en_intr.ei_type == EFX_INTR_LINE && index >= EFX_MAXRSS_LEGACY) { rc = EINVAL; goto fail3; } #endif for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS); size++) if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS)) break; if (id + (1 << size) >= encp->enc_buftbl_limit) { rc = EINVAL; goto fail4; } /* Set up the handler table */ eep->ee_rx = falconsiena_ev_rx; eep->ee_tx = falconsiena_ev_tx; eep->ee_driver = falconsiena_ev_driver; eep->ee_global = falconsiena_ev_global; eep->ee_drv_gen = falconsiena_ev_drv_gen; #if EFSYS_OPT_MCDI eep->ee_mcdi = falconsiena_ev_mcdi; #endif /* EFSYS_OPT_MCDI */ /* Set up the new event queue */ if (enp->en_family != EFX_FAMILY_FALCON) { EFX_POPULATE_OWORD_1(oword, FRF_CZ_TIMER_Q_EN, 1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE); } EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size, FRF_AZ_EVQ_BUF_BASE_ID, id); EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE); return (0); fail4: EFSYS_PROBE(fail4); #if EFSYS_OPT_RX_SCALE fail3: EFSYS_PROBE(fail3); #endif fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock b693ddf85aee1bfd */ static const char *__efx_ev_qstat_name[] = { "all", "rx", "rx_ok", "rx_frm_trunc", "rx_tobe_disc", "rx_pause_frm_err", "rx_buf_owner_id_err", "rx_ipv4_hdr_chksum_err", "rx_tcp_udp_chksum_err", "rx_eth_crc_err", "rx_ip_frag_err", "rx_mcast_pkt", "rx_mcast_hash_match", "rx_tcp_ipv4", "rx_tcp_ipv6", "rx_udp_ipv4", "rx_udp_ipv6", "rx_other_ipv4", "rx_other_ipv6", "rx_non_ip", "rx_batch", "tx", "tx_wq_ff_full", "tx_pkt_err", "tx_pkt_too_big", "tx_unexpected", "global", "global_mnt", "driver", "driver_srm_upd_done", "driver_tx_descq_fls_done", "driver_rx_descq_fls_done", "driver_rx_descq_fls_failed", "driver_rx_dsc_error", "driver_tx_dsc_error", "drv_gen", "mcdi_response", }; /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */ const char * efx_ev_qstat_name( __in efx_nic_t *enp, __in unsigned int id) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(id, <, EV_NQSTATS); return (__efx_ev_qstat_name[id]); } #endif /* EFSYS_OPT_NAMES */ #endif /* EFSYS_OPT_QSTATS */ #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA #if EFSYS_OPT_QSTATS static void falconsiena_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) { unsigned int id; for (id = 0; id < EV_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, eep->ee_stat[id]); eep->ee_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ static void falconsiena_ev_qdestroy( __in efx_evq_t *eep) { efx_nic_t *enp = eep->ee_enp; efx_oword_t oword; /* Purge event queue */ EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, eep->ee_index, &oword, B_TRUE); if (enp->en_family != EFX_FAMILY_FALCON) { EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE); } } static void falconsiena_ev_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ Index: stable/10/sys/dev/sfxge/common/efx_filter.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_filter.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_filter.c (revision 293927) @@ -1,1439 +1,1439 @@ /*- * Copyright (c) 2007-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_FILTER #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_init( __in efx_nic_t *enp); static void falconsiena_filter_fini( __in efx_nic_t *enp); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_restore( __in efx_nic_t *enp); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_supported_filters( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length); #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ #if EFSYS_OPT_FALCON static efx_filter_ops_t __efx_filter_falcon_ops = { falconsiena_filter_init, /* efo_init */ falconsiena_filter_fini, /* efo_fini */ falconsiena_filter_restore, /* efo_restore */ falconsiena_filter_add, /* efo_add */ falconsiena_filter_delete, /* efo_delete */ falconsiena_filter_supported_filters, /* efo_supported_filters */ NULL, /* efo_reconfigure */ }; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA static efx_filter_ops_t __efx_filter_siena_ops = { falconsiena_filter_init, /* efo_init */ falconsiena_filter_fini, /* efo_fini */ falconsiena_filter_restore, /* efo_restore */ falconsiena_filter_add, /* efo_add */ falconsiena_filter_delete, /* efo_delete */ falconsiena_filter_supported_filters, /* efo_supported_filters */ NULL, /* efo_reconfigure */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static efx_filter_ops_t __efx_filter_hunt_ops = { hunt_filter_init, /* efo_init */ hunt_filter_fini, /* efo_fini */ hunt_filter_restore, /* efo_restore */ hunt_filter_add, /* efo_add */ hunt_filter_delete, /* efo_delete */ hunt_filter_supported_filters, /* efo_supported_filters */ hunt_filter_reconfigure, /* efo_reconfigure */ }; #endif /* EFSYS_OPT_HUNTINGTON */ - __checkReturn int + __checkReturn efx_rc_t efx_filter_insert( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec) { efx_filter_ops_t *efop = enp->en_efop; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX); return (efop->efo_add(enp, spec, B_FALSE)); } - __checkReturn int + __checkReturn efx_rc_t efx_filter_remove( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec) { efx_filter_ops_t *efop = enp->en_efop; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX); #if EFSYS_OPT_RX_SCALE spec->efs_rss_context = enp->en_rss_context; #endif return (efop->efo_delete(enp, spec)); } - __checkReturn int + __checkReturn efx_rc_t efx_filter_restore( __in efx_nic_t *enp) { - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); if ((rc = enp->en_efop->efo_restore(enp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_filter_init( __in efx_nic_t *enp) { efx_filter_ops_t *efop; - int rc; + efx_rc_t rc; /* Check that efx_filter_spec_t is 64 bytes. */ EFX_STATIC_ASSERT(sizeof (efx_filter_spec_t) == 64); EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_FILTER)); switch (enp->en_family) { #if EFSYS_OPT_FALCON case EFX_FAMILY_FALCON: efop = (efx_filter_ops_t *)&__efx_filter_falcon_ops; break; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: efop = (efx_filter_ops_t *)&__efx_filter_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: efop = (efx_filter_ops_t *)&__efx_filter_hunt_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } if ((rc = efop->efo_init(enp)) != 0) goto fail2; enp->en_efop = efop; enp->en_mod_flags |= EFX_MOD_FILTER; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_efop = NULL; enp->en_mod_flags &= ~EFX_MOD_FILTER; return (rc); } void efx_filter_fini( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); enp->en_efop->efo_fini(enp); enp->en_efop = NULL; enp->en_mod_flags &= ~EFX_MOD_FILTER; } - __checkReturn int + __checkReturn efx_rc_t efx_filter_supported_filters( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length) { - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); EFSYS_ASSERT(enp->en_efop->efo_supported_filters != NULL); if ((rc = enp->en_efop->efo_supported_filters(enp, list, length)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in int count) { - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); if (enp->en_efop->efo_reconfigure != NULL) { if ((rc = enp->en_efop->efo_reconfigure(enp, mac_addr, all_unicst, mulcst, all_mulcst, brdcst, addrs, count)) != 0) goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_filter_spec_init_rx( __inout efx_filter_spec_t *spec, __in efx_filter_priority_t priority, __in efx_filter_flag_t flags, __in efx_rxq_t *erp) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3P(erp, !=, NULL); EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_RX_SCATTER)) == 0); memset(spec, 0, sizeof (*spec)); spec->efs_priority = priority; spec->efs_flags = EFX_FILTER_FLAG_RX | flags; spec->efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT; spec->efs_dmaq_id = (uint16_t)erp->er_index; } void efx_filter_spec_init_tx( __inout efx_filter_spec_t *spec, __in efx_txq_t *etp) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3P(etp, !=, NULL); memset(spec, 0, sizeof (*spec)); spec->efs_priority = EFX_FILTER_PRI_REQUIRED; spec->efs_flags = EFX_FILTER_FLAG_TX; spec->efs_dmaq_id = (uint16_t)etp->et_index; } /* * Specify IPv4 host, transport protocol and port in a filter specification */ -__checkReturn int +__checkReturn efx_rc_t efx_filter_spec_set_ipv4_local( __inout efx_filter_spec_t *spec, __in uint8_t proto, __in uint32_t host, __in uint16_t port) { EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; spec->efs_ether_type = EFX_ETHER_TYPE_IPV4; spec->efs_ip_proto = proto; spec->efs_loc_host.eo_u32[0] = host; spec->efs_loc_port = port; return (0); } /* * Specify IPv4 hosts, transport protocol and ports in a filter specification */ -__checkReturn int +__checkReturn efx_rc_t efx_filter_spec_set_ipv4_full( __inout efx_filter_spec_t *spec, __in uint8_t proto, __in uint32_t lhost, __in uint16_t lport, __in uint32_t rhost, __in uint16_t rport) { EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; spec->efs_ether_type = EFX_ETHER_TYPE_IPV4; spec->efs_ip_proto = proto; spec->efs_loc_host.eo_u32[0] = lhost; spec->efs_loc_port = lport; spec->efs_rem_host.eo_u32[0] = rhost; spec->efs_rem_port = rport; return (0); } /* * Specify local Ethernet address and/or VID in filter specification */ -__checkReturn int +__checkReturn efx_rc_t efx_filter_spec_set_eth_local( __inout efx_filter_spec_t *spec, __in uint16_t vid, __in const uint8_t *addr) { EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3P(addr, !=, NULL); if (vid == EFX_FILTER_SPEC_VID_UNSPEC && addr == NULL) return (EINVAL); if (vid != EFX_FILTER_SPEC_VID_UNSPEC) { spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID; spec->efs_outer_vid = vid; } if (addr != NULL) { spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC; memcpy(spec->efs_loc_mac, addr, EFX_MAC_ADDR_LEN); } return (0); } /* * Specify matching otherwise-unmatched unicast in a filter specification */ -__checkReturn int +__checkReturn efx_rc_t efx_filter_spec_set_uc_def( __inout efx_filter_spec_t *spec) { EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; return (0); } /* * Specify matching otherwise-unmatched multicast in a filter specification */ -__checkReturn int +__checkReturn efx_rc_t efx_filter_spec_set_mc_def( __inout efx_filter_spec_t *spec) { EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; spec->efs_loc_mac[0] = 1; return (0); } #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA /* * "Fudge factors" - difference between programmed value and actual depth. * Due to pipelined implementation we need to program H/W with a value that * is larger than the hop limit we want. */ #define FILTER_CTL_SRCH_FUDGE_WILD 3 #define FILTER_CTL_SRCH_FUDGE_FULL 1 /* * Hard maximum hop limit. Hardware will time-out beyond 200-something. * We also need to avoid infinite loops in efx_filter_search() when the * table is full. */ #define FILTER_CTL_SRCH_MAX 200 -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_spec_from_gen_spec( __out falconsiena_filter_spec_t *fs_spec, __in efx_filter_spec_t *gen_spec) { - int rc; + efx_rc_t rc; boolean_t is_full = B_FALSE; if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) EFSYS_ASSERT3U(gen_spec->efs_flags, ==, EFX_FILTER_FLAG_TX); else EFSYS_ASSERT3U(gen_spec->efs_flags, &, EFX_FILTER_FLAG_RX); /* Falconsiena only has one RSS context */ if ((gen_spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->efs_rss_context != 0) { rc = EINVAL; goto fail1; } fs_spec->fsfs_flags = gen_spec->efs_flags; fs_spec->fsfs_dmaq_id = gen_spec->efs_dmaq_id; switch (gen_spec->efs_match_flags) { case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT: is_full = B_TRUE; /* Fall through */ case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT: { uint32_t rhost, host1, host2; uint16_t rport, port1, port2; if (gen_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4) { rc = ENOTSUP; goto fail2; } if (gen_spec->efs_loc_port == 0 || (is_full && gen_spec->efs_rem_port == 0)) { rc = EINVAL; goto fail3; } switch (gen_spec->efs_ip_proto) { case EFX_IPPROTO_TCP: if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { fs_spec->fsfs_type = (is_full ? EFX_FS_FILTER_TX_TCP_FULL : EFX_FS_FILTER_TX_TCP_WILD); } else { fs_spec->fsfs_type = (is_full ? EFX_FS_FILTER_RX_TCP_FULL : EFX_FS_FILTER_RX_TCP_WILD); } break; case EFX_IPPROTO_UDP: if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { fs_spec->fsfs_type = (is_full ? EFX_FS_FILTER_TX_UDP_FULL : EFX_FS_FILTER_TX_UDP_WILD); } else { fs_spec->fsfs_type = (is_full ? EFX_FS_FILTER_RX_UDP_FULL : EFX_FS_FILTER_RX_UDP_WILD); } break; default: rc = ENOTSUP; goto fail4; } /* * The filter is constructed in terms of source and destination, * with the odd wrinkle that the ports are swapped in a UDP * wildcard filter. We need to convert from local and remote * addresses (zero for a wildcard). */ rhost = is_full ? gen_spec->efs_rem_host.eo_u32[0] : 0; rport = is_full ? gen_spec->efs_rem_port : 0; if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { host1 = gen_spec->efs_loc_host.eo_u32[0]; host2 = rhost; } else { host1 = rhost; host2 = gen_spec->efs_loc_host.eo_u32[0]; } if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { if (fs_spec->fsfs_type == EFX_FS_FILTER_TX_UDP_WILD) { port1 = rport; port2 = gen_spec->efs_loc_port; } else { port1 = gen_spec->efs_loc_port; port2 = rport; } } else { if (fs_spec->fsfs_type == EFX_FS_FILTER_RX_UDP_WILD) { port1 = gen_spec->efs_loc_port; port2 = rport; } else { port1 = rport; port2 = gen_spec->efs_loc_port; } } fs_spec->fsfs_dword[0] = (host1 << 16) | port1; fs_spec->fsfs_dword[1] = (port2 << 16) | (host1 >> 16); fs_spec->fsfs_dword[2] = host2; break; } case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: is_full = B_TRUE; /* Fall through */ case EFX_FILTER_MATCH_LOC_MAC: if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { fs_spec->fsfs_type = (is_full ? EFX_FS_FILTER_TX_MAC_FULL : EFX_FS_FILTER_TX_MAC_WILD); } else { fs_spec->fsfs_type = (is_full ? EFX_FS_FILTER_RX_MAC_FULL : EFX_FS_FILTER_RX_MAC_WILD); } fs_spec->fsfs_dword[0] = is_full ? gen_spec->efs_outer_vid : 0; fs_spec->fsfs_dword[1] = gen_spec->efs_loc_mac[2] << 24 | gen_spec->efs_loc_mac[3] << 16 | gen_spec->efs_loc_mac[4] << 8 | gen_spec->efs_loc_mac[5]; fs_spec->fsfs_dword[2] = gen_spec->efs_loc_mac[0] << 8 | gen_spec->efs_loc_mac[1]; break; default: EFSYS_ASSERT(B_FALSE); rc = ENOTSUP; goto fail5; } return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit * key derived from the n-tuple. */ static uint16_t falconsiena_filter_tbl_hash( __in uint32_t key) { uint16_t tmp; /* First 16 rounds */ tmp = 0x1fff ^ (uint16_t)(key >> 16); tmp = tmp ^ tmp >> 3 ^ tmp >> 6; tmp = tmp ^ tmp >> 9; /* Last 16 rounds */ tmp = tmp ^ tmp << 13 ^ (uint16_t)(key & 0xffff); tmp = tmp ^ tmp >> 3 ^ tmp >> 6; tmp = tmp ^ tmp >> 9; return (tmp); } /* * To allow for hash collisions, filter search continues at these * increments from the first possible entry selected by the hash. */ static uint16_t falconsiena_filter_tbl_increment( __in uint32_t key) { return ((uint16_t)(key * 2 - 1)); } static __checkReturn boolean_t falconsiena_filter_test_used( __in falconsiena_filter_tbl_t *fsftp, __in unsigned int index) { EFSYS_ASSERT3P(fsftp->fsft_bitmap, !=, NULL); return ((fsftp->fsft_bitmap[index / 32] & (1 << (index % 32))) != 0); } static void falconsiena_filter_set_used( __in falconsiena_filter_tbl_t *fsftp, __in unsigned int index) { EFSYS_ASSERT3P(fsftp->fsft_bitmap, !=, NULL); fsftp->fsft_bitmap[index / 32] |= (1 << (index % 32)); ++fsftp->fsft_used; } static void falconsiena_filter_clear_used( __in falconsiena_filter_tbl_t *fsftp, __in unsigned int index) { EFSYS_ASSERT3P(fsftp->fsft_bitmap, !=, NULL); fsftp->fsft_bitmap[index / 32] &= ~(1 << (index % 32)); --fsftp->fsft_used; EFSYS_ASSERT3U(fsftp->fsft_used, >=, 0); } static falconsiena_filter_tbl_id_t falconsiena_filter_tbl_id( __in falconsiena_filter_type_t type) { falconsiena_filter_tbl_id_t tbl_id; switch (type) { case EFX_FS_FILTER_RX_TCP_FULL: case EFX_FS_FILTER_RX_TCP_WILD: case EFX_FS_FILTER_RX_UDP_FULL: case EFX_FS_FILTER_RX_UDP_WILD: tbl_id = EFX_FS_FILTER_TBL_RX_IP; break; #if EFSYS_OPT_SIENA case EFX_FS_FILTER_RX_MAC_FULL: case EFX_FS_FILTER_RX_MAC_WILD: tbl_id = EFX_FS_FILTER_TBL_RX_MAC; break; case EFX_FS_FILTER_TX_TCP_FULL: case EFX_FS_FILTER_TX_TCP_WILD: case EFX_FS_FILTER_TX_UDP_FULL: case EFX_FS_FILTER_TX_UDP_WILD: tbl_id = EFX_FS_FILTER_TBL_TX_IP; break; case EFX_FS_FILTER_TX_MAC_FULL: case EFX_FS_FILTER_TX_MAC_WILD: tbl_id = EFX_FS_FILTER_TBL_TX_MAC; break; #endif /* EFSYS_OPT_SIENA */ default: EFSYS_ASSERT(B_FALSE); tbl_id = EFX_FS_FILTER_NTBLS; break; } return (tbl_id); } static void falconsiena_filter_reset_search_depth( __inout falconsiena_filter_t *fsfp, __in falconsiena_filter_tbl_id_t tbl_id) { switch (tbl_id) { case EFX_FS_FILTER_TBL_RX_IP: fsfp->fsf_depth[EFX_FS_FILTER_RX_TCP_FULL] = 0; fsfp->fsf_depth[EFX_FS_FILTER_RX_TCP_WILD] = 0; fsfp->fsf_depth[EFX_FS_FILTER_RX_UDP_FULL] = 0; fsfp->fsf_depth[EFX_FS_FILTER_RX_UDP_WILD] = 0; break; #if EFSYS_OPT_SIENA case EFX_FS_FILTER_TBL_RX_MAC: fsfp->fsf_depth[EFX_FS_FILTER_RX_MAC_FULL] = 0; fsfp->fsf_depth[EFX_FS_FILTER_RX_MAC_WILD] = 0; break; case EFX_FS_FILTER_TBL_TX_IP: fsfp->fsf_depth[EFX_FS_FILTER_TX_TCP_FULL] = 0; fsfp->fsf_depth[EFX_FS_FILTER_TX_TCP_WILD] = 0; fsfp->fsf_depth[EFX_FS_FILTER_TX_UDP_FULL] = 0; fsfp->fsf_depth[EFX_FS_FILTER_TX_UDP_WILD] = 0; break; case EFX_FS_FILTER_TBL_TX_MAC: fsfp->fsf_depth[EFX_FS_FILTER_TX_MAC_FULL] = 0; fsfp->fsf_depth[EFX_FS_FILTER_TX_MAC_WILD] = 0; break; #endif /* EFSYS_OPT_SIENA */ default: EFSYS_ASSERT(B_FALSE); break; } } static void falconsiena_filter_push_rx_limits( __in efx_nic_t *enp) { falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter; efx_oword_t oword; EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_FULL_SRCH_LIMIT, fsfp->fsf_depth[EFX_FS_FILTER_RX_TCP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_WILD_SRCH_LIMIT, fsfp->fsf_depth[EFX_FS_FILTER_RX_TCP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_FULL_SRCH_LIMIT, fsfp->fsf_depth[EFX_FS_FILTER_RX_UDP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_WILD_SRCH_LIMIT, fsfp->fsf_depth[EFX_FS_FILTER_RX_UDP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); #if EFSYS_OPT_SIENA if (fsfp->fsf_tbl[EFX_FS_FILTER_TBL_RX_MAC].fsft_size) { EFX_SET_OWORD_FIELD(oword, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, fsfp->fsf_depth[EFX_FS_FILTER_RX_MAC_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, fsfp->fsf_depth[EFX_FS_FILTER_RX_MAC_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); } #endif /* EFSYS_OPT_SIENA */ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); } static void falconsiena_filter_push_tx_limits( __in efx_nic_t *enp) { falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter; efx_oword_t oword; EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword); if (fsfp->fsf_tbl[EFX_FS_FILTER_TBL_TX_IP].fsft_size != 0) { EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE, fsfp->fsf_depth[EFX_FS_FILTER_TX_TCP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE, fsfp->fsf_depth[EFX_FS_FILTER_TX_TCP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE, fsfp->fsf_depth[EFX_FS_FILTER_TX_UDP_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE, fsfp->fsf_depth[EFX_FS_FILTER_TX_UDP_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); } if (fsfp->fsf_tbl[EFX_FS_FILTER_TBL_TX_MAC].fsft_size != 0) { EFX_SET_OWORD_FIELD( oword, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, fsfp->fsf_depth[EFX_FS_FILTER_TX_MAC_FULL] + FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD( oword, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, fsfp->fsf_depth[EFX_FS_FILTER_TX_MAC_WILD] + FILTER_CTL_SRCH_FUDGE_WILD); } EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword); } /* Build a filter entry and return its n-tuple key. */ static __checkReturn uint32_t falconsiena_filter_build( __out efx_oword_t *filter, __in falconsiena_filter_spec_t *spec) { uint32_t dword3; uint32_t key; uint8_t type = spec->fsfs_type; uint32_t flags = spec->fsfs_flags; switch (falconsiena_filter_tbl_id(type)) { case EFX_FS_FILTER_TBL_RX_IP: { boolean_t is_udp = (type == EFX_FS_FILTER_RX_UDP_FULL || type == EFX_FS_FILTER_RX_UDP_WILD); EFX_POPULATE_OWORD_7(*filter, FRF_BZ_RSS_EN, (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0, FRF_BZ_SCATTER_EN, (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0, FRF_AZ_TCP_UDP, is_udp, FRF_AZ_RXQ_ID, spec->fsfs_dmaq_id, EFX_DWORD_2, spec->fsfs_dword[2], EFX_DWORD_1, spec->fsfs_dword[1], EFX_DWORD_0, spec->fsfs_dword[0]); dword3 = is_udp; break; } #if EFSYS_OPT_SIENA case EFX_FS_FILTER_TBL_RX_MAC: { boolean_t is_wild = (type == EFX_FS_FILTER_RX_MAC_WILD); EFX_POPULATE_OWORD_7(*filter, FRF_CZ_RMFT_RSS_EN, (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0, FRF_CZ_RMFT_SCATTER_EN, (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0, FRF_CZ_RMFT_RXQ_ID, spec->fsfs_dmaq_id, FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, FRF_CZ_RMFT_DEST_MAC_DW1, spec->fsfs_dword[2], FRF_CZ_RMFT_DEST_MAC_DW0, spec->fsfs_dword[1], FRF_CZ_RMFT_VLAN_ID, spec->fsfs_dword[0]); dword3 = is_wild; break; } #endif /* EFSYS_OPT_SIENA */ case EFX_FS_FILTER_TBL_TX_IP: { boolean_t is_udp = (type == EFX_FS_FILTER_TX_UDP_FULL || type == EFX_FS_FILTER_TX_UDP_WILD); EFX_POPULATE_OWORD_5(*filter, FRF_CZ_TIFT_TCP_UDP, is_udp, FRF_CZ_TIFT_TXQ_ID, spec->fsfs_dmaq_id, EFX_DWORD_2, spec->fsfs_dword[2], EFX_DWORD_1, spec->fsfs_dword[1], EFX_DWORD_0, spec->fsfs_dword[0]); dword3 = is_udp | spec->fsfs_dmaq_id << 1; break; } #if EFSYS_OPT_SIENA case EFX_FS_FILTER_TBL_TX_MAC: { boolean_t is_wild = (type == EFX_FS_FILTER_TX_MAC_WILD); EFX_POPULATE_OWORD_5(*filter, FRF_CZ_TMFT_TXQ_ID, spec->fsfs_dmaq_id, FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, FRF_CZ_TMFT_SRC_MAC_DW1, spec->fsfs_dword[2], FRF_CZ_TMFT_SRC_MAC_DW0, spec->fsfs_dword[1], FRF_CZ_TMFT_VLAN_ID, spec->fsfs_dword[0]); dword3 = is_wild | spec->fsfs_dmaq_id << 1; break; } #endif /* EFSYS_OPT_SIENA */ default: EFSYS_ASSERT(B_FALSE); return (0); } key = spec->fsfs_dword[0] ^ spec->fsfs_dword[1] ^ spec->fsfs_dword[2] ^ dword3; return (key); } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_push_entry( __inout efx_nic_t *enp, __in falconsiena_filter_type_t type, __in int index, __in efx_oword_t *eop) { - int rc; + efx_rc_t rc; switch (type) { case EFX_FS_FILTER_RX_TCP_FULL: case EFX_FS_FILTER_RX_TCP_WILD: case EFX_FS_FILTER_RX_UDP_FULL: case EFX_FS_FILTER_RX_UDP_WILD: EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_FILTER_TBL0, index, eop, B_TRUE); break; #if EFSYS_OPT_SIENA case EFX_FS_FILTER_RX_MAC_FULL: case EFX_FS_FILTER_RX_MAC_WILD: EFX_BAR_TBL_WRITEO(enp, FR_CZ_RX_MAC_FILTER_TBL0, index, eop, B_TRUE); break; case EFX_FS_FILTER_TX_TCP_FULL: case EFX_FS_FILTER_TX_TCP_WILD: case EFX_FS_FILTER_TX_UDP_FULL: case EFX_FS_FILTER_TX_UDP_WILD: EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_FILTER_TBL0, index, eop, B_TRUE); break; case EFX_FS_FILTER_TX_MAC_FULL: case EFX_FS_FILTER_TX_MAC_WILD: EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_MAC_FILTER_TBL0, index, eop, B_TRUE); break; #endif /* EFSYS_OPT_SIENA */ default: EFSYS_ASSERT(B_FALSE); rc = ENOTSUP; goto fail1; } return (0); fail1: return (rc); } static __checkReturn boolean_t falconsiena_filter_equal( __in const falconsiena_filter_spec_t *left, __in const falconsiena_filter_spec_t *right) { falconsiena_filter_tbl_id_t tbl_id; tbl_id = falconsiena_filter_tbl_id(left->fsfs_type); if (left->fsfs_type != right->fsfs_type) return (B_FALSE); if (memcmp(left->fsfs_dword, right->fsfs_dword, sizeof (left->fsfs_dword))) return (B_FALSE); if ((tbl_id == EFX_FS_FILTER_TBL_TX_IP || tbl_id == EFX_FS_FILTER_TBL_TX_MAC) && left->fsfs_dmaq_id != right->fsfs_dmaq_id) return (B_FALSE); return (B_TRUE); } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_search( __in falconsiena_filter_tbl_t *fsftp, __in falconsiena_filter_spec_t *spec, __in uint32_t key, __in boolean_t for_insert, __out int *filter_index, __out unsigned int *depth_required) { unsigned hash, incr, filter_idx, depth; hash = falconsiena_filter_tbl_hash(key); incr = falconsiena_filter_tbl_increment(key); filter_idx = hash & (fsftp->fsft_size - 1); depth = 1; for (;;) { /* * Return success if entry is used and matches this spec * or entry is unused and we are trying to insert. */ if (falconsiena_filter_test_used(fsftp, filter_idx) ? falconsiena_filter_equal(spec, &fsftp->fsft_spec[filter_idx]) : for_insert) { *filter_index = filter_idx; *depth_required = depth; return (0); } /* Return failure if we reached the maximum search depth */ if (depth == FILTER_CTL_SRCH_MAX) return (for_insert ? EBUSY : ENOENT); filter_idx = (filter_idx + incr) & (fsftp->fsft_size - 1); ++depth; } } static void falconsiena_filter_clear_entry( __in efx_nic_t *enp, __in falconsiena_filter_tbl_t *fsftp, __in int index) { efx_oword_t filter; if (falconsiena_filter_test_used(fsftp, index)) { falconsiena_filter_clear_used(fsftp, index); EFX_ZERO_OWORD(filter); falconsiena_filter_push_entry(enp, fsftp->fsft_spec[index].fsfs_type, index, &filter); memset(&fsftp->fsft_spec[index], 0, sizeof (fsftp->fsft_spec[0])); } } void falconsiena_filter_tbl_clear( __in efx_nic_t *enp, __in falconsiena_filter_tbl_id_t tbl_id) { falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter; falconsiena_filter_tbl_t *fsftp = &fsfp->fsf_tbl[tbl_id]; int index; int state; EFSYS_LOCK(enp->en_eslp, state); for (index = 0; index < fsftp->fsft_size; ++index) { falconsiena_filter_clear_entry(enp, fsftp, index); } if (fsftp->fsft_used == 0) falconsiena_filter_reset_search_depth(fsfp, tbl_id); EFSYS_UNLOCK(enp->en_eslp, state); } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_init( __in efx_nic_t *enp) { falconsiena_filter_t *fsfp; falconsiena_filter_tbl_t *fsftp; int tbl_id; - int rc; + efx_rc_t rc; EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (falconsiena_filter_t), fsfp); if (!fsfp) { rc = ENOMEM; goto fail1; } enp->en_filter.ef_falconsiena_filter = fsfp; switch (enp->en_family) { #if EFSYS_OPT_FALCON case EFX_FAMILY_FALCON: fsftp = &fsfp->fsf_tbl[EFX_FS_FILTER_TBL_RX_IP]; fsftp->fsft_size = FR_AZ_RX_FILTER_TBL0_ROWS; break; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: fsftp = &fsfp->fsf_tbl[EFX_FS_FILTER_TBL_RX_IP]; fsftp->fsft_size = FR_AZ_RX_FILTER_TBL0_ROWS; fsftp = &fsfp->fsf_tbl[EFX_FS_FILTER_TBL_RX_MAC]; fsftp->fsft_size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; fsftp = &fsfp->fsf_tbl[EFX_FS_FILTER_TBL_TX_IP]; fsftp->fsft_size = FR_CZ_TX_FILTER_TBL0_ROWS; fsftp = &fsfp->fsf_tbl[EFX_FS_FILTER_TBL_TX_MAC]; fsftp->fsft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; break; #endif /* EFSYS_OPT_SIENA */ default: rc = ENOTSUP; goto fail2; } for (tbl_id = 0; tbl_id < EFX_FS_FILTER_NTBLS; tbl_id++) { unsigned int bitmap_size; fsftp = &fsfp->fsf_tbl[tbl_id]; if (fsftp->fsft_size == 0) continue; EFX_STATIC_ASSERT(sizeof (fsftp->fsft_bitmap[0]) == sizeof (uint32_t)); bitmap_size = (fsftp->fsft_size + (sizeof (uint32_t) * 8) - 1) / 8; EFSYS_KMEM_ALLOC(enp->en_esip, bitmap_size, fsftp->fsft_bitmap); if (!fsftp->fsft_bitmap) { rc = ENOMEM; goto fail3; } EFSYS_KMEM_ALLOC(enp->en_esip, fsftp->fsft_size * sizeof (*fsftp->fsft_spec), fsftp->fsft_spec); if (!fsftp->fsft_spec) { rc = ENOMEM; goto fail4; } memset(fsftp->fsft_spec, 0, fsftp->fsft_size * sizeof (*fsftp->fsft_spec)); } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); falconsiena_filter_fini(enp); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static void falconsiena_filter_fini( __in efx_nic_t *enp) { falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter; falconsiena_filter_tbl_id_t tbl_id; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); if (fsfp == NULL) return; for (tbl_id = 0; tbl_id < EFX_FS_FILTER_NTBLS; tbl_id++) { falconsiena_filter_tbl_t *fsftp = &fsfp->fsf_tbl[tbl_id]; unsigned int bitmap_size; EFX_STATIC_ASSERT(sizeof (fsftp->fsft_bitmap[0]) == sizeof (uint32_t)); bitmap_size = (fsftp->fsft_size + (sizeof (uint32_t) * 8) - 1) / 8; if (fsftp->fsft_bitmap != NULL) { EFSYS_KMEM_FREE(enp->en_esip, bitmap_size, fsftp->fsft_bitmap); fsftp->fsft_bitmap = NULL; } if (fsftp->fsft_spec != NULL) { EFSYS_KMEM_FREE(enp->en_esip, fsftp->fsft_size * sizeof (*fsftp->fsft_spec), fsftp->fsft_spec); fsftp->fsft_spec = NULL; } } EFSYS_KMEM_FREE(enp->en_esip, sizeof (falconsiena_filter_t), enp->en_filter.ef_falconsiena_filter); } /* Restore filter state after a reset */ -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_restore( __in efx_nic_t *enp) { falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter; falconsiena_filter_tbl_id_t tbl_id; falconsiena_filter_tbl_t *fsftp; falconsiena_filter_spec_t *spec; efx_oword_t filter; int filter_idx; int state; - int rc; + efx_rc_t rc; EFSYS_LOCK(enp->en_eslp, state); for (tbl_id = 0; tbl_id < EFX_FS_FILTER_NTBLS; tbl_id++) { fsftp = &fsfp->fsf_tbl[tbl_id]; for (filter_idx = 0; filter_idx < fsftp->fsft_size; filter_idx++) { if (!falconsiena_filter_test_used(fsftp, filter_idx)) continue; spec = &fsftp->fsft_spec[filter_idx]; if ((rc = falconsiena_filter_build(&filter, spec)) != 0) goto fail1; if ((rc = falconsiena_filter_push_entry(enp, spec->fsfs_type, filter_idx, &filter)) != 0) goto fail2; } } falconsiena_filter_push_rx_limits(enp); falconsiena_filter_push_tx_limits(enp); EFSYS_UNLOCK(enp->en_eslp, state); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace) { - int rc; + efx_rc_t rc; falconsiena_filter_spec_t fs_spec; falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter; falconsiena_filter_tbl_id_t tbl_id; falconsiena_filter_tbl_t *fsftp; falconsiena_filter_spec_t *saved_fs_spec; efx_oword_t filter; int filter_idx; unsigned int depth; int state; uint32_t key; EFSYS_ASSERT3P(spec, !=, NULL); if ((rc = falconsiena_filter_spec_from_gen_spec(&fs_spec, spec)) != 0) goto fail1; tbl_id = falconsiena_filter_tbl_id(fs_spec.fsfs_type); fsftp = &fsfp->fsf_tbl[tbl_id]; if (fsftp->fsft_size == 0) { rc = EINVAL; goto fail2; } key = falconsiena_filter_build(&filter, &fs_spec); EFSYS_LOCK(enp->en_eslp, state); rc = falconsiena_filter_search(fsftp, &fs_spec, key, B_TRUE, &filter_idx, &depth); if (rc != 0) goto fail3; EFSYS_ASSERT3U(filter_idx, <, fsftp->fsft_size); saved_fs_spec = &fsftp->fsft_spec[filter_idx]; if (falconsiena_filter_test_used(fsftp, filter_idx)) { if (may_replace == B_FALSE) { rc = EEXIST; goto fail4; } } falconsiena_filter_set_used(fsftp, filter_idx); *saved_fs_spec = fs_spec; if (fsfp->fsf_depth[fs_spec.fsfs_type] < depth) { fsfp->fsf_depth[fs_spec.fsfs_type] = depth; if (tbl_id == EFX_FS_FILTER_TBL_TX_IP || tbl_id == EFX_FS_FILTER_TBL_TX_MAC) falconsiena_filter_push_tx_limits(enp); else falconsiena_filter_push_rx_limits(enp); } falconsiena_filter_push_entry(enp, fs_spec.fsfs_type, filter_idx, &filter); EFSYS_UNLOCK(enp->en_eslp, state); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec) { - int rc; + efx_rc_t rc; falconsiena_filter_spec_t fs_spec; falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter; falconsiena_filter_tbl_id_t tbl_id; falconsiena_filter_tbl_t *fsftp; falconsiena_filter_spec_t *saved_spec; efx_oword_t filter; int filter_idx; unsigned int depth; int state; uint32_t key; EFSYS_ASSERT3P(spec, !=, NULL); if ((rc = falconsiena_filter_spec_from_gen_spec(&fs_spec, spec)) != 0) goto fail1; tbl_id = falconsiena_filter_tbl_id(fs_spec.fsfs_type); fsftp = &fsfp->fsf_tbl[tbl_id]; key = falconsiena_filter_build(&filter, &fs_spec); EFSYS_LOCK(enp->en_eslp, state); rc = falconsiena_filter_search(fsftp, &fs_spec, key, B_FALSE, &filter_idx, &depth); if (rc != 0) goto fail2; saved_spec = &fsftp->fsft_spec[filter_idx]; falconsiena_filter_clear_entry(enp, fsftp, filter_idx); if (fsftp->fsft_used == 0) falconsiena_filter_reset_search_depth(fsfp, tbl_id); EFSYS_UNLOCK(enp->en_eslp, state); return (0); fail2: EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #define MAX_SUPPORTED 4 -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_filter_supported_filters( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length) { int index = 0; uint32_t rx_matches[MAX_SUPPORTED]; - int rc; + efx_rc_t rc; if (list == NULL) { rc = EINVAL; goto fail1; } rx_matches[index++] = EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; rx_matches[index++] = EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; if (enp->en_features & EFX_FEATURE_MAC_HEADER_FILTERS) { rx_matches[index++] = EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC; rx_matches[index++] = EFX_FILTER_MATCH_LOC_MAC; } EFSYS_ASSERT3U(index, <=, MAX_SUPPORTED); *length = index; memcpy(list, rx_matches, *length); return (0); fail1: return (rc); } #undef MAX_SUPPORTED #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ #endif /* EFSYS_OPT_FILTER */ Index: stable/10/sys/dev/sfxge/common/efx_impl.h =================================================================== --- stable/10/sys/dev/sfxge/common/efx_impl.h (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_impl.h (revision 293927) @@ -1,1166 +1,1170 @@ /*- * Copyright (c) 2007-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_EFX_IMPL_H #define _SYS_EFX_IMPL_H #include "efsys.h" #include "efx.h" #include "efx_regs.h" #include "efx_regs_ef10.h" /* FIXME: Add definition for driver generated software events */ #ifndef ESE_DZ_EV_CODE_DRV_GEN_EV #define ESE_DZ_EV_CODE_DRV_GEN_EV FSE_AZ_EV_CODE_DRV_GEN_EV #endif #include "efx_check.h" #if EFSYS_OPT_FALCON #include "falcon_impl.h" #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA #include "siena_impl.h" #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON #include "hunt_impl.h" #endif /* EFSYS_OPT_HUNTINGTON */ #ifdef __cplusplus extern "C" { #endif #define EFX_MOD_MCDI 0x00000001 #define EFX_MOD_PROBE 0x00000002 #define EFX_MOD_NVRAM 0x00000004 #define EFX_MOD_VPD 0x00000008 #define EFX_MOD_NIC 0x00000010 #define EFX_MOD_INTR 0x00000020 #define EFX_MOD_EV 0x00000040 #define EFX_MOD_RX 0x00000080 #define EFX_MOD_TX 0x00000100 #define EFX_MOD_PORT 0x00000200 #define EFX_MOD_MON 0x00000400 #define EFX_MOD_WOL 0x00000800 #define EFX_MOD_FILTER 0x00001000 #define EFX_MOD_PKTFILTER 0x00002000 #define EFX_RESET_MAC 0x00000001 #define EFX_RESET_PHY 0x00000002 #define EFX_RESET_RXQ_ERR 0x00000004 #define EFX_RESET_TXQ_ERR 0x00000008 typedef enum efx_mac_type_e { EFX_MAC_INVALID = 0, EFX_MAC_FALCON_GMAC, EFX_MAC_FALCON_XMAC, EFX_MAC_SIENA, EFX_MAC_HUNTINGTON, EFX_MAC_NTYPES } efx_mac_type_t; typedef struct efx_ev_ops_s { - int (*eevo_init)(efx_nic_t *); + efx_rc_t (*eevo_init)(efx_nic_t *); void (*eevo_fini)(efx_nic_t *); - int (*eevo_qcreate)(efx_nic_t *, unsigned int, + efx_rc_t (*eevo_qcreate)(efx_nic_t *, unsigned int, efsys_mem_t *, size_t, uint32_t, efx_evq_t *); void (*eevo_qdestroy)(efx_evq_t *); - int (*eevo_qprime)(efx_evq_t *, unsigned int); + efx_rc_t (*eevo_qprime)(efx_evq_t *, unsigned int); void (*eevo_qpost)(efx_evq_t *, uint16_t); - int (*eevo_qmoderate)(efx_evq_t *, unsigned int); + efx_rc_t (*eevo_qmoderate)(efx_evq_t *, unsigned int); #if EFSYS_OPT_QSTATS void (*eevo_qstats_update)(efx_evq_t *, efsys_stat_t *); #endif } efx_ev_ops_t; typedef struct efx_tx_ops_s { - int (*etxo_init)(efx_nic_t *); + efx_rc_t (*etxo_init)(efx_nic_t *); void (*etxo_fini)(efx_nic_t *); - int (*etxo_qcreate)(efx_nic_t *, + efx_rc_t (*etxo_qcreate)(efx_nic_t *, unsigned int, unsigned int, efsys_mem_t *, size_t, uint32_t, uint16_t, efx_evq_t *, efx_txq_t *, unsigned int *); void (*etxo_qdestroy)(efx_txq_t *); - int (*etxo_qpost)(efx_txq_t *, efx_buffer_t *, + efx_rc_t (*etxo_qpost)(efx_txq_t *, efx_buffer_t *, unsigned int, unsigned int, unsigned int *); void (*etxo_qpush)(efx_txq_t *, unsigned int, unsigned int); - int (*etxo_qpace)(efx_txq_t *, unsigned int); - int (*etxo_qflush)(efx_txq_t *); + efx_rc_t (*etxo_qpace)(efx_txq_t *, unsigned int); + efx_rc_t (*etxo_qflush)(efx_txq_t *); void (*etxo_qenable)(efx_txq_t *); - int (*etxo_qpio_enable)(efx_txq_t *); + efx_rc_t (*etxo_qpio_enable)(efx_txq_t *); void (*etxo_qpio_disable)(efx_txq_t *); - int (*etxo_qpio_write)(efx_txq_t *,uint8_t *, size_t, + efx_rc_t (*etxo_qpio_write)(efx_txq_t *,uint8_t *, size_t, size_t); - int (*etxo_qpio_post)(efx_txq_t *, size_t, unsigned int, + efx_rc_t (*etxo_qpio_post)(efx_txq_t *, size_t, unsigned int, unsigned int *); - int (*etxo_qdesc_post)(efx_txq_t *, efx_desc_t *, + efx_rc_t (*etxo_qdesc_post)(efx_txq_t *, efx_desc_t *, unsigned int, unsigned int, unsigned int *); void (*etxo_qdesc_dma_create)(efx_txq_t *, efsys_dma_addr_t, size_t, boolean_t, efx_desc_t *); void (*etxo_qdesc_tso_create)(efx_txq_t *, uint16_t, uint32_t, uint8_t, efx_desc_t *); void (*etxo_qdesc_vlantci_create)(efx_txq_t *, uint16_t, efx_desc_t *); #if EFSYS_OPT_QSTATS void (*etxo_qstats_update)(efx_txq_t *, efsys_stat_t *); #endif } efx_tx_ops_t; typedef struct efx_rx_ops_s { - int (*erxo_init)(efx_nic_t *); + efx_rc_t (*erxo_init)(efx_nic_t *); void (*erxo_fini)(efx_nic_t *); #if EFSYS_OPT_RX_HDR_SPLIT - int (*erxo_hdr_split_enable)(efx_nic_t *, unsigned int, + efx_rc_t (*erxo_hdr_split_enable)(efx_nic_t *, unsigned int, unsigned int); #endif #if EFSYS_OPT_RX_SCATTER - int (*erxo_scatter_enable)(efx_nic_t *, unsigned int); + efx_rc_t (*erxo_scatter_enable)(efx_nic_t *, unsigned int); #endif #if EFSYS_OPT_RX_SCALE - int (*erxo_scale_mode_set)(efx_nic_t *, efx_rx_hash_alg_t, + efx_rc_t (*erxo_scale_mode_set)(efx_nic_t *, efx_rx_hash_alg_t, efx_rx_hash_type_t, boolean_t); - int (*erxo_scale_key_set)(efx_nic_t *, uint8_t *, size_t); - int (*erxo_scale_tbl_set)(efx_nic_t *, unsigned int *, + efx_rc_t (*erxo_scale_key_set)(efx_nic_t *, uint8_t *, size_t); + efx_rc_t (*erxo_scale_tbl_set)(efx_nic_t *, unsigned int *, size_t); #endif void (*erxo_qpost)(efx_rxq_t *, efsys_dma_addr_t *, size_t, unsigned int, unsigned int, unsigned int); void (*erxo_qpush)(efx_rxq_t *, unsigned int, unsigned int *); - int (*erxo_qflush)(efx_rxq_t *); + efx_rc_t (*erxo_qflush)(efx_rxq_t *); void (*erxo_qenable)(efx_rxq_t *); - int (*erxo_qcreate)(efx_nic_t *enp, unsigned int, + efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int, unsigned int, efx_rxq_type_t, efsys_mem_t *, size_t, uint32_t, efx_evq_t *, efx_rxq_t *); void (*erxo_qdestroy)(efx_rxq_t *); } efx_rx_ops_t; typedef struct efx_mac_ops_s { - int (*emo_reset)(efx_nic_t *); /* optional */ - int (*emo_poll)(efx_nic_t *, efx_link_mode_t *); - int (*emo_up)(efx_nic_t *, boolean_t *); - int (*emo_addr_set)(efx_nic_t *); - int (*emo_reconfigure)(efx_nic_t *); - int (*emo_multicast_list_set)(efx_nic_t *); - int (*emo_filter_default_rxq_set)(efx_nic_t *, + efx_rc_t (*emo_reset)(efx_nic_t *); /* optional */ + efx_rc_t (*emo_poll)(efx_nic_t *, efx_link_mode_t *); + efx_rc_t (*emo_up)(efx_nic_t *, boolean_t *); + efx_rc_t (*emo_addr_set)(efx_nic_t *); + efx_rc_t (*emo_reconfigure)(efx_nic_t *); + efx_rc_t (*emo_multicast_list_set)(efx_nic_t *); + efx_rc_t (*emo_filter_default_rxq_set)(efx_nic_t *, efx_rxq_t *, boolean_t); void (*emo_filter_default_rxq_clear)(efx_nic_t *); #if EFSYS_OPT_LOOPBACK - int (*emo_loopback_set)(efx_nic_t *, efx_link_mode_t, + efx_rc_t (*emo_loopback_set)(efx_nic_t *, efx_link_mode_t, efx_loopback_type_t); #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS - int (*emo_stats_upload)(efx_nic_t *, efsys_mem_t *); - int (*emo_stats_periodic)(efx_nic_t *, efsys_mem_t *, + efx_rc_t (*emo_stats_upload)(efx_nic_t *, efsys_mem_t *); + efx_rc_t (*emo_stats_periodic)(efx_nic_t *, efsys_mem_t *, uint16_t, boolean_t); - int (*emo_stats_update)(efx_nic_t *, efsys_mem_t *, + efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *, efsys_stat_t *, uint32_t *); #endif /* EFSYS_OPT_MAC_STATS */ } efx_mac_ops_t; typedef struct efx_phy_ops_s { - int (*epo_power)(efx_nic_t *, boolean_t); /* optional */ - int (*epo_reset)(efx_nic_t *); - int (*epo_reconfigure)(efx_nic_t *); - int (*epo_verify)(efx_nic_t *); - int (*epo_uplink_check)(efx_nic_t *, + efx_rc_t (*epo_power)(efx_nic_t *, boolean_t); /* optional */ + efx_rc_t (*epo_reset)(efx_nic_t *); + efx_rc_t (*epo_reconfigure)(efx_nic_t *); + efx_rc_t (*epo_verify)(efx_nic_t *); + efx_rc_t (*epo_uplink_check)(efx_nic_t *, boolean_t *); /* optional */ - int (*epo_downlink_check)(efx_nic_t *, efx_link_mode_t *, + efx_rc_t (*epo_downlink_check)(efx_nic_t *, efx_link_mode_t *, unsigned int *, uint32_t *); - int (*epo_oui_get)(efx_nic_t *, uint32_t *); + efx_rc_t (*epo_oui_get)(efx_nic_t *, uint32_t *); #if EFSYS_OPT_PHY_STATS - int (*epo_stats_update)(efx_nic_t *, efsys_mem_t *, + efx_rc_t (*epo_stats_update)(efx_nic_t *, efsys_mem_t *, uint32_t *); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES const char *(*epo_prop_name)(efx_nic_t *, unsigned int); #endif /* EFSYS_OPT_PHY_PROPS */ - int (*epo_prop_get)(efx_nic_t *, unsigned int, uint32_t, + efx_rc_t (*epo_prop_get)(efx_nic_t *, unsigned int, uint32_t, uint32_t *); - int (*epo_prop_set)(efx_nic_t *, unsigned int, uint32_t); + efx_rc_t (*epo_prop_set)(efx_nic_t *, unsigned int, uint32_t); #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST - int (*epo_bist_enable_offline)(efx_nic_t *); - int (*epo_bist_start)(efx_nic_t *, efx_bist_type_t); - int (*epo_bist_poll)(efx_nic_t *, efx_bist_type_t, + efx_rc_t (*epo_bist_enable_offline)(efx_nic_t *); + efx_rc_t (*epo_bist_start)(efx_nic_t *, efx_bist_type_t); + efx_rc_t (*epo_bist_poll)(efx_nic_t *, efx_bist_type_t, efx_bist_result_t *, uint32_t *, unsigned long *, size_t); void (*epo_bist_stop)(efx_nic_t *, efx_bist_type_t); #endif /* EFSYS_OPT_BIST */ } efx_phy_ops_t; #if EFSYS_OPT_FILTER typedef struct efx_filter_ops_s { - int (*efo_init)(efx_nic_t *); - void (*efo_fini)(efx_nic_t *); - int (*efo_restore)(efx_nic_t *); - int (*efo_add)(efx_nic_t *, efx_filter_spec_t *, - boolean_t may_replace); - int (*efo_delete)(efx_nic_t *, efx_filter_spec_t *); - int (*efo_supported_filters)(efx_nic_t *, uint32_t *, size_t *); - int (*efo_reconfigure)(efx_nic_t *, uint8_t const *, boolean_t, + efx_rc_t (*efo_init)(efx_nic_t *); + void (*efo_fini)(efx_nic_t *); + efx_rc_t (*efo_restore)(efx_nic_t *); + efx_rc_t (*efo_add)(efx_nic_t *, efx_filter_spec_t *, + boolean_t may_replace); + efx_rc_t (*efo_delete)(efx_nic_t *, efx_filter_spec_t *); + efx_rc_t (*efo_supported_filters)(efx_nic_t *, uint32_t *, size_t *); + efx_rc_t (*efo_reconfigure)(efx_nic_t *, uint8_t const *, boolean_t, boolean_t, boolean_t, boolean_t, uint8_t const *, int); } efx_filter_ops_t; -extern __checkReturn int +extern __checkReturn efx_rc_t efx_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in int count); #endif /* EFSYS_OPT_FILTER */ typedef struct efx_pktfilter_ops_s { - int (*epfo_set)(efx_nic_t *, + efx_rc_t (*epfo_set)(efx_nic_t *, boolean_t unicst, boolean_t brdcast); #if EFSYS_OPT_MCAST_FILTER_LIST - int (*epfo_mcast_list_set)(efx_nic_t *, uint8_t const *addrs, int count); + efx_rc_t (*epfo_mcast_list_set)(efx_nic_t *, + uint8_t const *addrs, int count); #endif /* EFSYS_OPT_MCAST_FILTER_LIST */ - int (*epfo_mcast_all)(efx_nic_t *); + efx_rc_t (*epfo_mcast_all)(efx_nic_t *); } efx_pktfilter_ops_t; typedef struct efx_port_s { efx_mac_type_t ep_mac_type; uint32_t ep_phy_type; uint8_t ep_port; uint32_t ep_mac_pdu; uint8_t ep_mac_addr[6]; efx_link_mode_t ep_link_mode; boolean_t ep_all_unicst; boolean_t ep_mulcst; boolean_t ep_all_mulcst; boolean_t ep_brdcst; unsigned int ep_fcntl; boolean_t ep_fcntl_autoneg; efx_oword_t ep_multicst_hash[2]; uint8_t ep_mulcst_addr_list[EFX_MAC_ADDR_LEN * EFX_MAC_MULTICAST_LIST_MAX]; uint32_t ep_mulcst_addr_count; #if EFSYS_OPT_LOOPBACK efx_loopback_type_t ep_loopback_type; efx_link_mode_t ep_loopback_link_mode; #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_PHY_FLAGS uint32_t ep_phy_flags; #endif /* EFSYS_OPT_PHY_FLAGS */ #if EFSYS_OPT_PHY_LED_CONTROL efx_phy_led_mode_t ep_phy_led_mode; #endif /* EFSYS_OPT_PHY_LED_CONTROL */ efx_phy_media_type_t ep_fixed_port_type; efx_phy_media_type_t ep_module_type; uint32_t ep_adv_cap_mask; uint32_t ep_lp_cap_mask; uint32_t ep_default_adv_cap_mask; uint32_t ep_phy_cap_mask; #if EFSYS_OPT_PHY_TXC43128 || EFSYS_OPT_PHY_QT2025C union { struct { unsigned int bug10934_count; } ep_txc43128; struct { unsigned int bug17190_count; } ep_qt2025c; }; #endif boolean_t ep_mac_poll_needed; /* falcon only */ boolean_t ep_mac_up; /* falcon only */ uint32_t ep_fwver; /* falcon only */ boolean_t ep_mac_drain; boolean_t ep_mac_stats_pending; #if EFSYS_OPT_BIST efx_bist_type_t ep_current_bist; #endif efx_mac_ops_t *ep_emop; efx_phy_ops_t *ep_epop; } efx_port_t; typedef struct efx_mon_ops_s { - int (*emo_reset)(efx_nic_t *); - int (*emo_reconfigure)(efx_nic_t *); + efx_rc_t (*emo_reset)(efx_nic_t *); + efx_rc_t (*emo_reconfigure)(efx_nic_t *); #if EFSYS_OPT_MON_STATS - int (*emo_stats_update)(efx_nic_t *, efsys_mem_t *, - efx_mon_stat_value_t *); + efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *, + efx_mon_stat_value_t *); #endif /* EFSYS_OPT_MON_STATS */ } efx_mon_ops_t; typedef struct efx_mon_s { efx_mon_type_t em_type; efx_mon_ops_t *em_emop; } efx_mon_t; typedef struct efx_intr_ops_s { - int (*eio_init)(efx_nic_t *, efx_intr_type_t, efsys_mem_t *); + efx_rc_t (*eio_init)(efx_nic_t *, efx_intr_type_t, efsys_mem_t *); void (*eio_enable)(efx_nic_t *); void (*eio_disable)(efx_nic_t *); void (*eio_disable_unlocked)(efx_nic_t *); - int (*eio_trigger)(efx_nic_t *, unsigned int); + efx_rc_t (*eio_trigger)(efx_nic_t *, unsigned int); void (*eio_fini)(efx_nic_t *); } efx_intr_ops_t; typedef struct efx_intr_s { efx_intr_ops_t *ei_eiop; efsys_mem_t *ei_esmp; efx_intr_type_t ei_type; unsigned int ei_level; } efx_intr_t; typedef struct efx_nic_ops_s { - int (*eno_probe)(efx_nic_t *); - int (*eno_set_drv_limits)(efx_nic_t *, efx_drv_limits_t*); - int (*eno_reset)(efx_nic_t *); - int (*eno_init)(efx_nic_t *); - int (*eno_get_vi_pool)(efx_nic_t *, uint32_t *); - int (*eno_get_bar_region)(efx_nic_t *, efx_nic_region_t, + efx_rc_t (*eno_probe)(efx_nic_t *); + efx_rc_t (*eno_set_drv_limits)(efx_nic_t *, efx_drv_limits_t*); + efx_rc_t (*eno_reset)(efx_nic_t *); + efx_rc_t (*eno_init)(efx_nic_t *); + efx_rc_t (*eno_get_vi_pool)(efx_nic_t *, uint32_t *); + efx_rc_t (*eno_get_bar_region)(efx_nic_t *, efx_nic_region_t, uint32_t *, size_t *); #if EFSYS_OPT_DIAG - int (*eno_sram_test)(efx_nic_t *, efx_sram_pattern_fn_t); - int (*eno_register_test)(efx_nic_t *); + efx_rc_t (*eno_sram_test)(efx_nic_t *, efx_sram_pattern_fn_t); + efx_rc_t (*eno_register_test)(efx_nic_t *); #endif /* EFSYS_OPT_DIAG */ - void (*eno_fini)(efx_nic_t *); - void (*eno_unprobe)(efx_nic_t *); + void (*eno_fini)(efx_nic_t *); + void (*eno_unprobe)(efx_nic_t *); } efx_nic_ops_t; #ifndef EFX_TXQ_LIMIT_TARGET #define EFX_TXQ_LIMIT_TARGET 259 #endif #ifndef EFX_RXQ_LIMIT_TARGET #define EFX_RXQ_LIMIT_TARGET 512 #endif #ifndef EFX_TXQ_DC_SIZE #define EFX_TXQ_DC_SIZE 1 /* 16 descriptors */ #endif #ifndef EFX_RXQ_DC_SIZE #define EFX_RXQ_DC_SIZE 3 /* 64 descriptors */ #endif #if EFSYS_OPT_FILTER typedef struct falconsiena_filter_spec_s { uint8_t fsfs_type; uint32_t fsfs_flags; uint32_t fsfs_dmaq_id; uint32_t fsfs_dword[3]; } falconsiena_filter_spec_t; typedef enum falconsiena_filter_type_e { EFX_FS_FILTER_RX_TCP_FULL, /* TCP/IPv4 4-tuple {dIP,dTCP,sIP,sTCP} */ EFX_FS_FILTER_RX_TCP_WILD, /* TCP/IPv4 dest {dIP,dTCP, -, -} */ EFX_FS_FILTER_RX_UDP_FULL, /* UDP/IPv4 4-tuple {dIP,dUDP,sIP,sUDP} */ EFX_FS_FILTER_RX_UDP_WILD, /* UDP/IPv4 dest {dIP,dUDP, -, -} */ #if EFSYS_OPT_SIENA EFX_FS_FILTER_RX_MAC_FULL, /* Ethernet {dMAC,VLAN} */ EFX_FS_FILTER_RX_MAC_WILD, /* Ethernet {dMAC, -} */ EFX_FS_FILTER_TX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */ EFX_FS_FILTER_TX_TCP_WILD, /* TCP/IPv4 { -, -,sIP,sTCP} */ EFX_FS_FILTER_TX_UDP_FULL, /* UDP/IPv4 {dIP,dTCP,sIP,sTCP} */ EFX_FS_FILTER_TX_UDP_WILD, /* UDP/IPv4 source (host, port) */ EFX_FS_FILTER_TX_MAC_FULL, /* Ethernet source (MAC address, VLAN ID) */ EFX_FS_FILTER_TX_MAC_WILD, /* Ethernet source (MAC address) */ #endif /* EFSYS_OPT_SIENA */ EFX_FS_FILTER_NTYPES } falconsiena_filter_type_t; typedef enum falconsiena_filter_tbl_id_e { EFX_FS_FILTER_TBL_RX_IP = 0, EFX_FS_FILTER_TBL_RX_MAC, EFX_FS_FILTER_TBL_TX_IP, EFX_FS_FILTER_TBL_TX_MAC, EFX_FS_FILTER_NTBLS } falconsiena_filter_tbl_id_t; typedef struct falconsiena_filter_tbl_s { int fsft_size; /* number of entries */ int fsft_used; /* active count */ uint32_t *fsft_bitmap; /* active bitmap */ falconsiena_filter_spec_t *fsft_spec; /* array of saved specs */ } falconsiena_filter_tbl_t; typedef struct falconsiena_filter_s { falconsiena_filter_tbl_t fsf_tbl[EFX_FS_FILTER_NTBLS]; unsigned int fsf_depth[EFX_FS_FILTER_NTYPES]; } falconsiena_filter_t; typedef struct efx_filter_s { #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA falconsiena_filter_t *ef_falconsiena_filter; #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON hunt_filter_table_t *ef_hunt_filter_table; #endif /* EFSYS_OPT_HUNTINGTON */ } efx_filter_t; extern void falconsiena_filter_tbl_clear( __in efx_nic_t *enp, __in falconsiena_filter_tbl_id_t tbl); #endif /* EFSYS_OPT_FILTER */ #if EFSYS_OPT_MCDI typedef struct efx_mcdi_ops_s { - int (*emco_init)(efx_nic_t *, const efx_mcdi_transport_t *); + efx_rc_t (*emco_init)(efx_nic_t *, const efx_mcdi_transport_t *); void (*emco_request_copyin)(efx_nic_t *, efx_mcdi_req_t *, unsigned int, boolean_t, boolean_t); boolean_t (*emco_request_poll)(efx_nic_t *); void (*emco_request_copyout)(efx_nic_t *, efx_mcdi_req_t *); - int (*emco_poll_reboot)(efx_nic_t *); + efx_rc_t (*emco_poll_reboot)(efx_nic_t *); void (*emco_fini)(efx_nic_t *); - int (*emco_fw_update_supported)(efx_nic_t *, boolean_t *); - int (*emco_macaddr_change_supported)(efx_nic_t *, boolean_t *); + efx_rc_t (*emco_fw_update_supported)(efx_nic_t *, boolean_t *); + efx_rc_t (*emco_macaddr_change_supported)(efx_nic_t *, boolean_t *); } efx_mcdi_ops_t; typedef struct efx_mcdi_s { efx_mcdi_ops_t *em_emcop; const efx_mcdi_transport_t *em_emtp; efx_mcdi_iface_t em_emip; } efx_mcdi_t; #endif /* EFSYS_OPT_MCDI */ #if EFSYS_OPT_NVRAM typedef struct efx_nvram_ops_s { #if EFSYS_OPT_DIAG - int (*envo_test)(efx_nic_t *); + efx_rc_t (*envo_test)(efx_nic_t *); #endif /* EFSYS_OPT_DIAG */ - int (*envo_size)(efx_nic_t *, efx_nvram_type_t, size_t *); - int (*envo_get_version)(efx_nic_t *, efx_nvram_type_t, - uint32_t *, uint16_t *); - int (*envo_rw_start)(efx_nic_t *, efx_nvram_type_t, size_t *); - int (*envo_read_chunk)(efx_nic_t *, efx_nvram_type_t, - unsigned int, caddr_t, size_t); - int (*envo_erase)(efx_nic_t *, efx_nvram_type_t); - int (*envo_write_chunk)(efx_nic_t *, efx_nvram_type_t, - unsigned int, caddr_t, size_t); - void (*envo_rw_finish)(efx_nic_t *, efx_nvram_type_t); - int (*envo_set_version)(efx_nic_t *, efx_nvram_type_t, uint16_t *); + efx_rc_t (*envo_size)(efx_nic_t *, efx_nvram_type_t, size_t *); + efx_rc_t (*envo_get_version)(efx_nic_t *, efx_nvram_type_t, + uint32_t *, uint16_t *); + efx_rc_t (*envo_rw_start)(efx_nic_t *, efx_nvram_type_t, size_t *); + efx_rc_t (*envo_read_chunk)(efx_nic_t *, efx_nvram_type_t, + unsigned int, caddr_t, size_t); + efx_rc_t (*envo_erase)(efx_nic_t *, efx_nvram_type_t); + efx_rc_t (*envo_write_chunk)(efx_nic_t *, efx_nvram_type_t, + unsigned int, caddr_t, size_t); + void (*envo_rw_finish)(efx_nic_t *, efx_nvram_type_t); + efx_rc_t (*envo_set_version)(efx_nic_t *, efx_nvram_type_t, + uint16_t *); } efx_nvram_ops_t; #endif /* EFSYS_OPT_NVRAM */ #if EFSYS_OPT_VPD typedef struct efx_vpd_ops_s { - int (*evpdo_init)(efx_nic_t *); - int (*evpdo_size)(efx_nic_t *, size_t *); - int (*evpdo_read)(efx_nic_t *, caddr_t, size_t); - int (*evpdo_verify)(efx_nic_t *, caddr_t, size_t); - int (*evpdo_reinit)(efx_nic_t *, caddr_t, size_t); - int (*evpdo_get)(efx_nic_t *, caddr_t, size_t, efx_vpd_value_t *); - int (*evpdo_set)(efx_nic_t *, caddr_t, size_t, efx_vpd_value_t *); - int (*evpdo_next)(efx_nic_t *, caddr_t, size_t, efx_vpd_value_t *, - unsigned int *); - int (*evpdo_write)(efx_nic_t *, caddr_t, size_t); - void (*evpdo_fini)(efx_nic_t *); + efx_rc_t (*evpdo_init)(efx_nic_t *); + efx_rc_t (*evpdo_size)(efx_nic_t *, size_t *); + efx_rc_t (*evpdo_read)(efx_nic_t *, caddr_t, size_t); + efx_rc_t (*evpdo_verify)(efx_nic_t *, caddr_t, size_t); + efx_rc_t (*evpdo_reinit)(efx_nic_t *, caddr_t, size_t); + efx_rc_t (*evpdo_get)(efx_nic_t *, caddr_t, size_t, + efx_vpd_value_t *); + efx_rc_t (*evpdo_set)(efx_nic_t *, caddr_t, size_t, + efx_vpd_value_t *); + efx_rc_t (*evpdo_next)(efx_nic_t *, caddr_t, size_t, + efx_vpd_value_t *, unsigned int *); + efx_rc_t (*evpdo_write)(efx_nic_t *, caddr_t, size_t); + void (*evpdo_fini)(efx_nic_t *); } efx_vpd_ops_t; #endif /* EFSYS_OPT_VPD */ #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_partitions( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size, __out unsigned int *npartnp); - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_metadata( __in efx_nic_t *enp, __in uint32_t partn, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4], __out_bcount_opt(size) char *descp, __in size_t size); - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_info( __in efx_nic_t *enp, __in uint32_t partn, __out_opt size_t *sizep, __out_opt uint32_t *addressp, __out_opt uint32_t *erase_sizep); - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_update_start( __in efx_nic_t *enp, __in uint32_t partn); - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_read( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t offset, __out_bcount(size) caddr_t data, __in size_t size); - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_erase( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t offset, __in size_t size); - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_write( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t offset, __out_bcount(size) caddr_t data, __in size_t size); - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_update_finish( __in efx_nic_t *enp, __in uint32_t partn, __in boolean_t reboot); #if EFSYS_OPT_DIAG - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_test( __in efx_nic_t *enp, __in uint32_t partn); #endif /* EFSYS_OPT_DIAG */ #endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ typedef struct efx_drv_cfg_s { uint32_t edc_min_vi_count; uint32_t edc_max_vi_count; uint32_t edc_max_piobuf_count; uint32_t edc_pio_alloc_size; } efx_drv_cfg_t; struct efx_nic_s { uint32_t en_magic; efx_family_t en_family; uint32_t en_features; efsys_identifier_t *en_esip; efsys_lock_t *en_eslp; efsys_bar_t *en_esbp; unsigned int en_mod_flags; unsigned int en_reset_flags; efx_nic_cfg_t en_nic_cfg; efx_drv_cfg_t en_drv_cfg; efx_port_t en_port; efx_mon_t en_mon; efx_intr_t en_intr; uint32_t en_ev_qcount; uint32_t en_rx_qcount; uint32_t en_tx_qcount; efx_nic_ops_t *en_enop; efx_ev_ops_t *en_eevop; efx_tx_ops_t *en_etxop; efx_rx_ops_t *en_erxop; #if EFSYS_OPT_FILTER efx_filter_t en_filter; efx_filter_ops_t *en_efop; #endif /* EFSYS_OPT_FILTER */ efx_pktfilter_ops_t *en_epfop; #if EFSYS_OPT_MCDI efx_mcdi_t en_mcdi; #endif /* EFSYS_OPT_MCDI */ #if EFSYS_OPT_NVRAM efx_nvram_type_t en_nvram_locked; efx_nvram_ops_t *en_envop; #endif /* EFSYS_OPT_NVRAM */ #if EFSYS_OPT_VPD efx_vpd_ops_t *en_evpdop; #endif /* EFSYS_OPT_VPD */ #if EFSYS_OPT_RX_SCALE efx_rx_hash_support_t en_hash_support; efx_rx_scale_support_t en_rss_support; uint32_t en_rss_context; #endif /* EFSYS_OPT_RX_SCALE */ uint32_t en_vport_id; union { #if EFSYS_OPT_FALCON struct { falcon_spi_dev_t enu_fsd[FALCON_SPI_NTYPES]; falcon_i2c_t enu_fip; boolean_t enu_i2c_locked; #if EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE const uint8_t *enu_forced_cfg; #endif /* EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE */ uint8_t enu_mon_devid; #if EFSYS_OPT_PCIE_TUNE unsigned int enu_nlanes; #endif /* EFSYS_OPT_PCIE_TUNE */ uint16_t enu_board_rev; boolean_t enu_internal_sram; uint8_t enu_sram_num_bank; uint8_t enu_sram_bank_size; } falcon; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA struct { #if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD unsigned int enu_partn_mask; #endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */ #if EFSYS_OPT_VPD caddr_t enu_svpd; size_t enu_svpd_length; #endif /* EFSYS_OPT_VPD */ int enu_unused; } siena; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON struct { int enu_vi_base; int enu_vi_count; #if EFSYS_OPT_VPD caddr_t enu_svpd; size_t enu_svpd_length; #endif /* EFSYS_OPT_VPD */ efx_piobuf_handle_t enu_piobuf_handle[HUNT_PIOBUF_NBUFS]; uint32_t enu_piobuf_count; uint32_t enu_pio_alloc_map[HUNT_PIOBUF_NBUFS]; uint32_t enu_pio_write_vi_base; /* Memory BAR mapping regions */ uint32_t enu_uc_mem_map_offset; size_t enu_uc_mem_map_size; uint32_t enu_wc_mem_map_offset; size_t enu_wc_mem_map_size; } hunt; #endif /* EFSYS_OPT_HUNTINGTON */ } en_u; }; #define EFX_NIC_MAGIC 0x02121996 typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *, const efx_ev_callbacks_t *, void *); typedef struct efx_evq_rxq_state_s { unsigned int eers_rx_read_ptr; unsigned int eers_rx_mask; } efx_evq_rxq_state_t; struct efx_evq_s { uint32_t ee_magic; efx_nic_t *ee_enp; unsigned int ee_index; unsigned int ee_mask; efsys_mem_t *ee_esmp; #if EFSYS_OPT_QSTATS uint32_t ee_stat[EV_NQSTATS]; #endif /* EFSYS_OPT_QSTATS */ efx_ev_handler_t ee_rx; efx_ev_handler_t ee_tx; efx_ev_handler_t ee_driver; efx_ev_handler_t ee_global; efx_ev_handler_t ee_drv_gen; #if EFSYS_OPT_MCDI efx_ev_handler_t ee_mcdi; #endif /* EFSYS_OPT_MCDI */ efx_evq_rxq_state_t ee_rxq_state[EFX_EV_RX_NLABELS]; }; #define EFX_EVQ_MAGIC 0x08081997 #define EFX_EVQ_FALCON_TIMER_QUANTUM_NS 4968 /* 621 cycles */ #define EFX_EVQ_SIENA_TIMER_QUANTUM_NS 6144 /* 768 cycles */ struct efx_rxq_s { uint32_t er_magic; efx_nic_t *er_enp; efx_evq_t *er_eep; unsigned int er_index; unsigned int er_label; unsigned int er_mask; efsys_mem_t *er_esmp; }; #define EFX_RXQ_MAGIC 0x15022005 struct efx_txq_s { uint32_t et_magic; efx_nic_t *et_enp; unsigned int et_index; unsigned int et_mask; efsys_mem_t *et_esmp; #if EFSYS_OPT_HUNTINGTON uint32_t et_pio_bufnum; uint32_t et_pio_blknum; uint32_t et_pio_write_offset; uint32_t et_pio_offset; size_t et_pio_size; #endif #if EFSYS_OPT_QSTATS uint32_t et_stat[TX_NQSTATS]; #endif /* EFSYS_OPT_QSTATS */ }; #define EFX_TXQ_MAGIC 0x05092005 #define EFX_MAC_ADDR_COPY(_dst, _src) \ do { \ (_dst)[0] = (_src)[0]; \ (_dst)[1] = (_src)[1]; \ (_dst)[2] = (_src)[2]; \ (_dst)[3] = (_src)[3]; \ (_dst)[4] = (_src)[4]; \ (_dst)[5] = (_src)[5]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_MAC_BROADCAST_ADDR_SET(_dst) \ do { \ uint16_t *_d = (uint16_t *)(_dst); \ _d[0] = 0xffff; \ _d[1] = 0xffff; \ _d[2] = 0xffff; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #if EFSYS_OPT_CHECK_REG #define EFX_CHECK_REG(_enp, _reg) \ do { \ const char *name = #_reg; \ char min = name[4]; \ char max = name[5]; \ char rev; \ \ switch ((_enp)->en_family) { \ case EFX_FAMILY_FALCON: \ rev = 'B'; \ break; \ \ case EFX_FAMILY_SIENA: \ rev = 'C'; \ break; \ \ case EFX_FAMILY_HUNTINGTON: \ rev = 'D'; \ break; \ \ default: \ rev = '?'; \ break; \ } \ \ EFSYS_ASSERT3S(rev, >=, min); \ EFSYS_ASSERT3S(rev, <=, max); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_CHECK_REG(_enp, _reg) do { \ _NOTE(CONSTANTCONDITION) \ } while(B_FALSE) #endif #define EFX_BAR_READD(_enp, _reg, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_BAR_READD((_enp)->en_esbp, _reg ## _OFST, \ (_edp), (_lock)); \ EFSYS_PROBE3(efx_bar_readd, const char *, #_reg, \ uint32_t, _reg ## _OFST, \ uint32_t, (_edp)->ed_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_WRITED(_enp, _reg, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE3(efx_bar_writed, const char *, #_reg, \ uint32_t, _reg ## _OFST, \ uint32_t, (_edp)->ed_u32[0]); \ EFSYS_BAR_WRITED((_enp)->en_esbp, _reg ## _OFST, \ (_edp), (_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_READQ(_enp, _reg, _eqp) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_BAR_READQ((_enp)->en_esbp, _reg ## _OFST, \ (_eqp)); \ EFSYS_PROBE4(efx_bar_readq, const char *, #_reg, \ uint32_t, _reg ## _OFST, \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_WRITEQ(_enp, _reg, _eqp) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE4(efx_bar_writeq, const char *, #_reg, \ uint32_t, _reg ## _OFST, \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ EFSYS_BAR_WRITEQ((_enp)->en_esbp, _reg ## _OFST, \ (_eqp)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_READO(_enp, _reg, _eop) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_BAR_READO((_enp)->en_esbp, _reg ## _OFST, \ (_eop), B_TRUE); \ EFSYS_PROBE6(efx_bar_reado, const char *, #_reg, \ uint32_t, _reg ## _OFST, \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_WRITEO(_enp, _reg, _eop) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE6(efx_bar_writeo, const char *, #_reg, \ uint32_t, _reg ## _OFST, \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ EFSYS_BAR_WRITEO((_enp)->en_esbp, _reg ## _OFST, \ (_eop), B_TRUE); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_BAR_READD((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_edp), (_lock)); \ EFSYS_PROBE4(efx_bar_tbl_readd, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_edp)->ed_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_WRITED(_enp, _reg, _index, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_edp)->ed_u32[0]); \ EFSYS_BAR_WRITED((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_edp), (_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_WRITED2(_enp, _reg, _index, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_edp)->ed_u32[0]); \ EFSYS_BAR_WRITED((_enp)->en_esbp, \ (_reg ## _OFST + \ (2 * sizeof (efx_dword_t)) + \ ((_index) * _reg ## _STEP)), \ (_edp), (_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_edp)->ed_u32[0]); \ EFSYS_BAR_WRITED((_enp)->en_esbp, \ (_reg ## _OFST + \ (3 * sizeof (efx_dword_t)) + \ ((_index) * _reg ## _STEP)), \ (_edp), (_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_READQ(_enp, _reg, _index, _eqp) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_BAR_READQ((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_eqp)); \ EFSYS_PROBE5(efx_bar_tbl_readq, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_WRITEQ(_enp, _reg, _index, _eqp) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE5(efx_bar_tbl_writeq, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ EFSYS_BAR_WRITEQ((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_eqp)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_READO(_enp, _reg, _index, _eop, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_BAR_READO((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_eop), (_lock)); \ EFSYS_PROBE7(efx_bar_tbl_reado, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_WRITEO(_enp, _reg, _index, _eop, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE7(efx_bar_tbl_writeo, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ EFSYS_BAR_WRITEO((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_eop), (_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* * Allow drivers to perform optimised 128-bit doorbell writes. * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are * special-cased in the BIU on the Falcon/Siena and EF10 architectures to avoid * the need for locking in the host, and are the only ones known to be safe to * use 128-bites write with. */ #define EFX_BAR_TBL_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE7(efx_bar_tbl_doorbell_writeo, \ const char *, \ #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ EFSYS_BAR_DOORBELL_WRITEO((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_eop)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_DMA_SYNC_QUEUE_FOR_DEVICE(_esmp, _entries, _wptr, _owptr) \ do { \ unsigned int _new = (_wptr); \ unsigned int _old = (_owptr); \ \ if ((_new) >= (_old)) \ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \ (_old) * sizeof (efx_desc_t), \ ((_new) - (_old)) * sizeof (efx_desc_t)); \ else \ /* \ * It is cheaper to sync entire map than sync \ * two parts especially when offset/size are \ * ignored and entire map is synced in any case.\ */ \ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \ 0, \ (_entries) * sizeof (efx_desc_t)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_biu_test( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mac_select( __in efx_nic_t *enp); extern void efx_mac_multicast_hash_compute( __in_ecount(6*count) uint8_t const *addrs, __in int count, __out efx_oword_t *hash_low, __out efx_oword_t *hash_high); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_phy_probe( __in efx_nic_t *enp); extern void efx_phy_unprobe( __in efx_nic_t *enp); #if EFSYS_OPT_VPD /* VPD utility functions */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_hunk_length( __in_bcount(size) caddr_t data, __in size_t size, __out size_t *lengthp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_hunk_verify( __in_bcount(size) caddr_t data, __in size_t size, __out_opt boolean_t *cksummedp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_hunk_reinit( __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t wantpid); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_hunk_get( __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_tag_t tag, __in efx_vpd_keyword_t keyword, __out unsigned int *payloadp, __out uint8_t *paylenp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_hunk_next( __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_tag_t *tagp, __out efx_vpd_keyword_t *keyword, __out_bcount_opt(*paylenp) unsigned int *payloadp, __out_opt uint8_t *paylenp, __inout unsigned int *contp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_vpd_hunk_set( __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp); #endif /* EFSYS_OPT_VPD */ #if EFSYS_OPT_DIAG extern efx_sram_pattern_fn_t __efx_sram_pattern_fns[]; typedef struct efx_register_set_s { unsigned int address; unsigned int step; unsigned int rows; efx_oword_t mask; } efx_register_set_t; -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_test_registers( __in efx_nic_t *enp, __in efx_register_set_t *rsp, __in size_t count); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_nic_test_tables( __in efx_nic_t *enp, __in efx_register_set_t *rsp, __in efx_pattern_type_t pattern, __in size_t count); #endif /* EFSYS_OPT_DIAG */ #if EFSYS_OPT_MCDI -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_set_workaround( __in efx_nic_t *enp, __in uint32_t type, __in boolean_t enabled, __out_opt uint32_t *flagsp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_get_workarounds( __in efx_nic_t *enp, __out_opt uint32_t *implementedp, __out_opt uint32_t *enabledp); #endif /* EFSYS_OPT_MCDI */ #ifdef __cplusplus } #endif #endif /* _SYS_EFX_IMPL_H */ Index: stable/10/sys/dev/sfxge/common/efx_intr.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_intr.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_intr.c (revision 293927) @@ -1,579 +1,579 @@ /*- * Copyright (c) 2007-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_intr_init( __in efx_nic_t *enp, __in efx_intr_type_t type, __in efsys_mem_t *esmp); static void falconsiena_intr_enable( __in efx_nic_t *enp); static void falconsiena_intr_disable( __in efx_nic_t *enp); static void falconsiena_intr_disable_unlocked( __in efx_nic_t *enp); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_intr_trigger( __in efx_nic_t *enp, __in unsigned int level); static void falconsiena_intr_fini( __in efx_nic_t *enp); static __checkReturn boolean_t falconsiena_intr_check_fatal( __in efx_nic_t *enp); static void falconsiena_intr_fatal( __in efx_nic_t *enp); #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ #if EFSYS_OPT_FALCON static efx_intr_ops_t __efx_intr_falcon_ops = { falconsiena_intr_init, /* eio_init */ falconsiena_intr_enable, /* eio_enable */ falconsiena_intr_disable, /* eio_disable */ falconsiena_intr_disable_unlocked, /* eio_disable_unlocked */ falconsiena_intr_trigger, /* eio_trigger */ falconsiena_intr_fini, /* eio_fini */ }; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA static efx_intr_ops_t __efx_intr_siena_ops = { falconsiena_intr_init, /* eio_init */ falconsiena_intr_enable, /* eio_enable */ falconsiena_intr_disable, /* eio_disable */ falconsiena_intr_disable_unlocked, /* eio_disable_unlocked */ falconsiena_intr_trigger, /* eio_trigger */ falconsiena_intr_fini, /* eio_fini */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static efx_intr_ops_t __efx_intr_hunt_ops = { hunt_intr_init, /* eio_init */ hunt_intr_enable, /* eio_enable */ hunt_intr_disable, /* eio_disable */ hunt_intr_disable_unlocked, /* eio_disable_unlocked */ hunt_intr_trigger, /* eio_trigger */ hunt_intr_fini, /* eio_fini */ }; #endif /* EFSYS_OPT_HUNTINGTON */ - __checkReturn int + __checkReturn efx_rc_t efx_intr_init( __in efx_nic_t *enp, __in efx_intr_type_t type, __in efsys_mem_t *esmp) { efx_intr_t *eip = &(enp->en_intr); efx_intr_ops_t *eiop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (enp->en_mod_flags & EFX_MOD_INTR) { rc = EINVAL; goto fail1; } eip->ei_esmp = esmp; eip->ei_type = type; eip->ei_level = 0; enp->en_mod_flags |= EFX_MOD_INTR; switch (enp->en_family) { #if EFSYS_OPT_FALCON case EFX_FAMILY_FALCON: eiop = (efx_intr_ops_t *)&__efx_intr_falcon_ops; break; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: eiop = (efx_intr_ops_t *)&__efx_intr_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: eiop = (efx_intr_ops_t *)&__efx_intr_hunt_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ default: EFSYS_ASSERT(B_FALSE); rc = ENOTSUP; goto fail2; } if ((rc = eiop->eio_init(enp, type, esmp)) != 0) goto fail3; eip->ei_eiop = eiop; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_intr_fini( __in efx_nic_t *enp) { efx_intr_t *eip = &(enp->en_intr); efx_intr_ops_t *eiop = eip->ei_eiop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); eiop->eio_fini(enp); enp->en_mod_flags &= ~EFX_MOD_INTR; } void efx_intr_enable( __in efx_nic_t *enp) { efx_intr_t *eip = &(enp->en_intr); efx_intr_ops_t *eiop = eip->ei_eiop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); eiop->eio_enable(enp); } void efx_intr_disable( __in efx_nic_t *enp) { efx_intr_t *eip = &(enp->en_intr); efx_intr_ops_t *eiop = eip->ei_eiop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); eiop->eio_disable(enp); } void efx_intr_disable_unlocked( __in efx_nic_t *enp) { efx_intr_t *eip = &(enp->en_intr); efx_intr_ops_t *eiop = eip->ei_eiop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); eiop->eio_disable_unlocked(enp); } - __checkReturn int + __checkReturn efx_rc_t efx_intr_trigger( __in efx_nic_t *enp, __in unsigned int level) { efx_intr_t *eip = &(enp->en_intr); efx_intr_ops_t *eiop = eip->ei_eiop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); return (eiop->eio_trigger(enp, level)); } void efx_intr_status_line( __in efx_nic_t *enp, __out boolean_t *fatalp, __out uint32_t *qmaskp) { efx_intr_t *eip = &(enp->en_intr); efx_dword_t dword; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); /* Ensure Huntington and Falcon/Siena ISR at same location */ EFX_STATIC_ASSERT(FR_BZ_INT_ISR0_REG_OFST == ER_DZ_BIU_INT_ISR_REG_OFST); /* * Read the queue mask and implicitly acknowledge the * interrupt. */ EFX_BAR_READD(enp, FR_BZ_INT_ISR0_REG, &dword, B_FALSE); *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0); EFSYS_PROBE1(qmask, uint32_t, *qmaskp); #if EFSYS_OPT_HUNTINGTON if (enp->en_family == EFX_FAMILY_HUNTINGTON) { /* Huntington reports fatal errors via events */ *fatalp = B_FALSE; return; } #endif if (*qmaskp & (1U << eip->ei_level)) *fatalp = falconsiena_intr_check_fatal(enp); else *fatalp = B_FALSE; } void efx_intr_status_message( __in efx_nic_t *enp, __in unsigned int message, __out boolean_t *fatalp) { efx_intr_t *eip = &(enp->en_intr); EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); #if EFSYS_OPT_HUNTINGTON if (enp->en_family == EFX_FAMILY_HUNTINGTON) { /* Huntington reports fatal errors via events */ *fatalp = B_FALSE; return; } #endif if (message == eip->ei_level) *fatalp = falconsiena_intr_check_fatal(enp); else *fatalp = B_FALSE; } void efx_intr_fatal( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); #if EFSYS_OPT_HUNTINGTON if (enp->en_family == EFX_FAMILY_HUNTINGTON) { /* Huntington reports fatal errors via events */ return; } #endif #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA falconsiena_intr_fatal(enp); #endif } /* ************************************************************************* */ /* ************************************************************************* */ /* ************************************************************************* */ #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_intr_init( __in efx_nic_t *enp, __in efx_intr_type_t type, __in efsys_mem_t *esmp) { efx_intr_t *eip = &(enp->en_intr); efx_oword_t oword; /* * bug17213 workaround. * * Under legacy interrupts, don't share a level between fatal * interrupts and event queue interrupts. Under MSI-X, they * must share, or we won't get an interrupt. */ if (enp->en_family == EFX_FAMILY_SIENA && eip->ei_type == EFX_INTR_LINE) eip->ei_level = 0x1f; else eip->ei_level = 0; /* Enable all the genuinely fatal interrupts */ EFX_SET_OWORD(oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_ILL_ADR_INT_KER_EN, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_RBUF_OWN_INT_KER_EN, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TBUF_OWN_INT_KER_EN, 0); if (enp->en_family >= EFX_FAMILY_SIENA) EFX_SET_OWORD_FIELD(oword, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 0); EFX_BAR_WRITEO(enp, FR_AZ_FATAL_INTR_REG_KER, &oword); /* Set up the interrupt address register */ EFX_POPULATE_OWORD_3(oword, FRF_AZ_NORM_INT_VEC_DIS_KER, (type == EFX_INTR_MESSAGE) ? 1 : 0, FRF_AZ_INT_ADR_KER_DW0, EFSYS_MEM_ADDR(esmp) & 0xffffffff, FRF_AZ_INT_ADR_KER_DW1, EFSYS_MEM_ADDR(esmp) >> 32); EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword); return (0); } static void falconsiena_intr_enable( __in efx_nic_t *enp) { efx_intr_t *eip = &(enp->en_intr); efx_oword_t oword; EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level); EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 1); EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword); } static void falconsiena_intr_disable( __in efx_nic_t *enp) { efx_oword_t oword; EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0); EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword); EFSYS_SPIN(10); } static void falconsiena_intr_disable_unlocked( __in efx_nic_t *enp) { efx_oword_t oword; EFSYS_BAR_READO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST, &oword, B_FALSE); EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0); EFSYS_BAR_WRITEO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST, &oword, B_FALSE); } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_intr_trigger( __in efx_nic_t *enp, __in unsigned int level) { efx_intr_t *eip = &(enp->en_intr); efx_oword_t oword; unsigned int count; uint32_t sel; - int rc; + efx_rc_t rc; /* bug16757: No event queues can be initialized */ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV)); switch (enp->en_family) { case EFX_FAMILY_FALCON: if (level >= EFX_NINTR_FALCON) { rc = EINVAL; goto fail1; } break; case EFX_FAMILY_SIENA: if (level >= EFX_NINTR_SIENA) { rc = EINVAL; goto fail1; } break; default: EFSYS_ASSERT(B_FALSE); break; } if (level > EFX_MASK32(FRF_AZ_KER_INT_LEVE_SEL)) return (ENOTSUP); /* avoid EFSYS_PROBE() */ sel = level; /* Trigger a test interrupt */ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, sel); EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER, 1); EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword); /* * Wait up to 100ms for the interrupt to be raised before restoring * KER_INT_LEVE_SEL. Ignore a failure to raise (the caller will * observe this soon enough anyway), but always reset KER_INT_LEVE_SEL */ count = 0; do { EFSYS_SPIN(100); /* 100us */ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword); } while (EFX_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER) && ++count < 1000); EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level); EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword); return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn boolean_t falconsiena_intr_check_fatal( __in efx_nic_t *enp) { efx_intr_t *eip = &(enp->en_intr); efsys_mem_t *esmp = eip->ei_esmp; efx_oword_t oword; /* Read the syndrome */ EFSYS_MEM_READO(esmp, 0, &oword); if (EFX_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT) != 0) { EFSYS_PROBE(fatal); /* Clear the fatal interrupt condition */ EFX_SET_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT, 0); EFSYS_MEM_WRITEO(esmp, 0, &oword); return (B_TRUE); } return (B_FALSE); } static void falconsiena_intr_fatal( __in efx_nic_t *enp) { #if EFSYS_OPT_DECODE_INTR_FATAL efx_oword_t fatal; efx_oword_t mem_per; EFX_BAR_READO(enp, FR_AZ_FATAL_INTR_REG_KER, &fatal); EFX_ZERO_OWORD(mem_per); if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0 || EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0) EFX_BAR_READO(enp, FR_AZ_MEM_STAT_REG, &mem_per); if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRAM_OOB_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_OOB, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_BUFID_DC_OOB_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_BUFID_DC_OOB, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_MEM_PERR, EFX_OWORD_FIELD(mem_per, EFX_DWORD_0), EFX_OWORD_FIELD(mem_per, EFX_DWORD_1)); if (EFX_OWORD_FIELD(fatal, FRF_AZ_RBUF_OWN_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_RBUF_OWN, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_TBUF_OWN_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_TBUF_OWN, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_RDESCQ_OWN_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_RDESQ_OWN, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_TDESCQ_OWN_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_TDESQ_OWN, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVQ_OWN_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_EVQ_OWN, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVF_OFLO_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_EVFF_OFLO, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_ILL_ADR_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_ILL_ADDR, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_PERR, EFX_OWORD_FIELD(mem_per, EFX_DWORD_0), EFX_OWORD_FIELD(mem_per, EFX_DWORD_1)); #else EFSYS_ASSERT(0); #endif } static void falconsiena_intr_fini( __in efx_nic_t *enp) { efx_oword_t oword; /* Clear the interrupt address register */ EFX_ZERO_OWORD(oword); EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword); } #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ Index: stable/10/sys/dev/sfxge/common/efx_mac.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_mac.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_mac.c (revision 293927) @@ -1,978 +1,978 @@ /*- * Copyright (c) 2007-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_impl.h" #if EFSYS_OPT_MAC_FALCON_GMAC #include "falcon_gmac.h" #endif #if EFSYS_OPT_MAC_FALCON_XMAC #include "falcon_xmac.h" #endif #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_mac_multicast_list_set( - __in efx_nic_t *enp); + __in efx_nic_t *enp); #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ #if EFSYS_OPT_MAC_FALCON_GMAC static efx_mac_ops_t __efx_falcon_gmac_ops = { falcon_gmac_reset, /* emo_reset */ falcon_mac_poll, /* emo_poll */ falcon_mac_up, /* emo_up */ falcon_gmac_reconfigure, /* emo_addr_set */ falcon_gmac_reconfigure, /* emo_reconfigure */ falconsiena_mac_multicast_list_set, /* emo_multicast_list_set */ NULL, /* emo_filter_set_default_rxq */ NULL, /* emo_filter_default_rxq_clear */ #if EFSYS_OPT_LOOPBACK falcon_mac_loopback_set, /* emo_loopback_set */ #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS falcon_mac_stats_upload, /* emo_stats_upload */ NULL, /* emo_stats_periodic */ falcon_gmac_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MAC_STATS */ }; #endif /* EFSYS_OPT_MAC_FALCON_GMAC */ #if EFSYS_OPT_MAC_FALCON_XMAC static efx_mac_ops_t __efx_falcon_xmac_ops = { falcon_xmac_reset, /* emo_reset */ falcon_mac_poll, /* emo_poll */ falcon_mac_up, /* emo_up */ falcon_xmac_reconfigure, /* emo_addr_set */ falcon_xmac_reconfigure, /* emo_reconfigure */ falconsiena_mac_multicast_list_set, /* emo_multicast_list_set */ NULL, /* emo_filter_set_default_rxq */ NULL, /* emo_filter_default_rxq_clear */ #if EFSYS_OPT_LOOPBACK falcon_mac_loopback_set, /* emo_loopback_set */ #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS falcon_mac_stats_upload, /* emo_stats_upload */ NULL, /* emo_stats_periodic */ falcon_xmac_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MAC_STATS */ }; #endif /* EFSYS_OPT_MAC_FALCON_XMAC */ #if EFSYS_OPT_SIENA static efx_mac_ops_t __efx_siena_mac_ops = { NULL, /* emo_reset */ siena_mac_poll, /* emo_poll */ siena_mac_up, /* emo_up */ siena_mac_reconfigure, /* emo_addr_set */ siena_mac_reconfigure, /* emo_reconfigure */ falconsiena_mac_multicast_list_set, /* emo_multicast_list_set */ NULL, /* emo_filter_set_default_rxq */ NULL, /* emo_filter_default_rxq_clear */ #if EFSYS_OPT_LOOPBACK siena_mac_loopback_set, /* emo_loopback_set */ #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS efx_mcdi_mac_stats_upload, /* emo_stats_upload */ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */ siena_mac_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MAC_STATS */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static efx_mac_ops_t __efx_hunt_mac_ops = { NULL, /* emo_reset */ hunt_mac_poll, /* emo_poll */ hunt_mac_up, /* emo_up */ hunt_mac_addr_set, /* emo_addr_set */ hunt_mac_reconfigure, /* emo_reconfigure */ hunt_mac_multicast_list_set, /* emo_multicast_list_set */ hunt_mac_filter_default_rxq_set, /* emo_filter_default_rxq_set */ hunt_mac_filter_default_rxq_clear, /* emo_filter_default_rxq_clear */ #if EFSYS_OPT_LOOPBACK hunt_mac_loopback_set, /* emo_loopback_set */ #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS efx_mcdi_mac_stats_upload, /* emo_stats_upload */ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */ hunt_mac_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MAC_STATS */ }; #endif /* EFSYS_OPT_HUNTINGTON */ static efx_mac_ops_t *__efx_mac_ops[] = { /* [EFX_MAC_INVALID] */ NULL, /* [EFX_MAC_FALCON_GMAC] */ #if EFSYS_OPT_MAC_FALCON_GMAC &__efx_falcon_gmac_ops, #else NULL, #endif /* [EFX_MAC_FALCON_XMAC] */ #if EFSYS_OPT_MAC_FALCON_XMAC &__efx_falcon_xmac_ops, #else NULL, #endif /* [EFX_MAC_SIENA] */ #if EFSYS_OPT_SIENA &__efx_siena_mac_ops, #else NULL, #endif /* [EFX_MAC_HUNTINGTON] */ #if EFSYS_OPT_HUNTINGTON &__efx_hunt_mac_ops, #else NULL, #endif }; - __checkReturn int + __checkReturn efx_rc_t efx_mac_pdu_set( __in efx_nic_t *enp, __in size_t pdu) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; uint32_t old_pdu; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); if (pdu < EFX_MAC_PDU_MIN) { rc = EINVAL; goto fail1; } if (pdu > EFX_MAC_PDU_MAX) { rc = EINVAL; goto fail2; } old_pdu = epp->ep_mac_pdu; epp->ep_mac_pdu = (uint32_t)pdu; if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); epp->ep_mac_pdu = old_pdu; fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mac_addr_set( __in efx_nic_t *enp, __in uint8_t *addr) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; uint8_t old_addr[6]; uint32_t oui; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if (EFX_MAC_ADDR_IS_MULTICAST(addr)) { rc = EINVAL; goto fail1; } oui = addr[0] << 16 | addr[1] << 8 | addr[2]; if (oui == 0x000000) { rc = EINVAL; goto fail2; } EFX_MAC_ADDR_COPY(old_addr, epp->ep_mac_addr); EFX_MAC_ADDR_COPY(epp->ep_mac_addr, addr); if ((rc = emop->emo_addr_set(enp)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); EFX_MAC_ADDR_COPY(epp->ep_mac_addr, old_addr); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mac_filter_set( __in efx_nic_t *enp, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; boolean_t old_all_unicst; boolean_t old_mulcst; boolean_t old_all_mulcst; boolean_t old_brdcst; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); old_all_unicst = epp->ep_all_unicst; old_mulcst = epp->ep_mulcst; old_all_mulcst = epp->ep_all_mulcst; old_brdcst = epp->ep_brdcst; epp->ep_all_unicst = all_unicst; epp->ep_mulcst = mulcst; epp->ep_all_mulcst = all_mulcst; epp->ep_brdcst = brdcst; if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); epp->ep_all_unicst = old_all_unicst; epp->ep_mulcst = old_mulcst; epp->ep_all_mulcst = old_all_mulcst; epp->ep_brdcst = old_brdcst; return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mac_drain( __in efx_nic_t *enp, __in boolean_t enabled) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); if (epp->ep_mac_drain == enabled) return (0); epp->ep_mac_drain = enabled; if (enabled && emop->emo_reset != NULL) { if ((rc = emop->emo_reset(enp)) != 0) goto fail1; EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_MAC); enp->en_reset_flags &= ~EFX_RESET_PHY; } if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if ((rc = emop->emo_up(enp, mac_upp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mac_fcntl_set( __in efx_nic_t *enp, __in unsigned int fcntl, __in boolean_t autoneg) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; efx_phy_ops_t *epop = epp->ep_epop; unsigned int old_fcntl; boolean_t old_autoneg; unsigned int old_adv_cap; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if ((fcntl & ~(EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE)) != 0) { rc = EINVAL; goto fail1; } /* * Ignore a request to set flow control auto-negotiation * if the PHY doesn't support it. */ if (~epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN)) autoneg = B_FALSE; old_fcntl = epp->ep_fcntl; old_autoneg = epp->ep_fcntl_autoneg; old_adv_cap = epp->ep_adv_cap_mask; epp->ep_fcntl = fcntl; epp->ep_fcntl_autoneg = autoneg; /* * Always encode the flow control settings in the advertised * capabilities even if we are not trying to auto-negotiate * them and reconfigure both the PHY and the MAC. */ if (fcntl & EFX_FCNTL_RESPOND) epp->ep_adv_cap_mask |= (1 << EFX_PHY_CAP_PAUSE | 1 << EFX_PHY_CAP_ASYM); else epp->ep_adv_cap_mask &= ~(1 << EFX_PHY_CAP_PAUSE | 1 << EFX_PHY_CAP_ASYM); if (fcntl & EFX_FCNTL_GENERATE) epp->ep_adv_cap_mask ^= (1 << EFX_PHY_CAP_ASYM); if ((rc = epop->epo_reconfigure(enp)) != 0) goto fail2; if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); epp->ep_fcntl = old_fcntl; epp->ep_fcntl_autoneg = old_autoneg; epp->ep_adv_cap_mask = old_adv_cap; fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_mac_fcntl_get( __in efx_nic_t *enp, __out unsigned int *fcntl_wantedp, __out unsigned int *fcntl_linkp) { efx_port_t *epp = &(enp->en_port); unsigned int wanted = 0; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); /* * Decode the requested flow control settings from the PHY * advertised capabilities. */ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_PAUSE)) wanted = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_ASYM)) wanted ^= EFX_FCNTL_GENERATE; *fcntl_linkp = epp->ep_fcntl; *fcntl_wantedp = wanted; } /* * FIXME: efx_mac_hash_set() should be deleted once all its callers have been * updated to use efx_mac_multicast_list_set(). * Then efx_port_t.ep_multicst_hash could be made Falcon/Siena specific as * well. */ - __checkReturn int + __checkReturn efx_rc_t efx_mac_hash_set( __in efx_nic_t *enp, __in_ecount(EFX_MAC_HASH_BITS) unsigned int const *bucket) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; efx_oword_t old_hash[2]; unsigned int index; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash)); /* Set the lower 128 bits of the hash */ EFX_ZERO_OWORD(epp->ep_multicst_hash[0]); for (index = 0; index < 128; index++) { if (bucket[index] != 0) EFX_SET_OWORD_BIT(epp->ep_multicst_hash[0], index); } /* Set the upper 128 bits of the hash */ EFX_ZERO_OWORD(epp->ep_multicst_hash[1]); for (index = 0; index < 128; index++) { if (bucket[index + 128] != 0) EFX_SET_OWORD_BIT(epp->ep_multicst_hash[1], index); } if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); memcpy(epp->ep_multicst_hash, old_hash, sizeof (old_hash)); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mac_multicast_list_set( __in efx_nic_t *enp, __in_ecount(6*count) uint8_t const *addrs, __in int count) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; uint8_t *old_mulcst_addr_list = NULL; uint32_t old_mulcst_addr_count; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if (count > EFX_MAC_MULTICAST_LIST_MAX) { rc = EINVAL; goto fail1; } old_mulcst_addr_count = epp->ep_mulcst_addr_count; if (old_mulcst_addr_count > 0) { /* Allocate memory to store old list (instead of using stack) */ EFSYS_KMEM_ALLOC(enp->en_esip, old_mulcst_addr_count * EFX_MAC_ADDR_LEN, old_mulcst_addr_list); if (old_mulcst_addr_list == NULL) { rc = ENOMEM; goto fail2; } /* Save the old list in case we need to rollback */ memcpy(old_mulcst_addr_list, epp->ep_mulcst_addr_list, old_mulcst_addr_count * EFX_MAC_ADDR_LEN); } /* Store the new list */ memcpy(epp->ep_mulcst_addr_list, addrs, count * EFX_MAC_ADDR_LEN); epp->ep_mulcst_addr_count = count; if ((rc = emop->emo_multicast_list_set(enp)) != 0) goto fail3; if (old_mulcst_addr_count > 0) { EFSYS_KMEM_FREE(enp->en_esip, old_mulcst_addr_count * EFX_MAC_ADDR_LEN, old_mulcst_addr_list); } return (0); fail3: EFSYS_PROBE(fail3); /* Restore original list on failure */ epp->ep_mulcst_addr_count = old_mulcst_addr_count; if (old_mulcst_addr_count > 0) { memcpy(epp->ep_mulcst_addr_list, old_mulcst_addr_list, old_mulcst_addr_count * EFX_MAC_ADDR_LEN); EFSYS_KMEM_FREE(enp->en_esip, old_mulcst_addr_count * EFX_MAC_ADDR_LEN, old_mulcst_addr_list); } fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mac_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if (emop->emo_filter_default_rxq_set != NULL) { rc = emop->emo_filter_default_rxq_set(enp, erp, using_rss); if (rc != 0) goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_mac_filter_default_rxq_clear( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if (emop->emo_filter_default_rxq_clear != NULL) emop->emo_filter_default_rxq_clear(enp); } #if EFSYS_OPT_MAC_STATS #if EFSYS_OPT_NAMES /* START MKCONFIG GENERATED EfxMacStatNamesBlock 054d43a31d2d7a45 */ static const char *__efx_mac_stat_name[] = { "rx_octets", "rx_pkts", "rx_unicst_pkts", "rx_multicst_pkts", "rx_brdcst_pkts", "rx_pause_pkts", "rx_le_64_pkts", "rx_65_to_127_pkts", "rx_128_to_255_pkts", "rx_256_to_511_pkts", "rx_512_to_1023_pkts", "rx_1024_to_15xx_pkts", "rx_ge_15xx_pkts", "rx_errors", "rx_fcs_errors", "rx_drop_events", "rx_false_carrier_errors", "rx_symbol_errors", "rx_align_errors", "rx_internal_errors", "rx_jabber_pkts", "rx_lane0_char_err", "rx_lane1_char_err", "rx_lane2_char_err", "rx_lane3_char_err", "rx_lane0_disp_err", "rx_lane1_disp_err", "rx_lane2_disp_err", "rx_lane3_disp_err", "rx_match_fault", "rx_nodesc_drop_cnt", "tx_octets", "tx_pkts", "tx_unicst_pkts", "tx_multicst_pkts", "tx_brdcst_pkts", "tx_pause_pkts", "tx_le_64_pkts", "tx_65_to_127_pkts", "tx_128_to_255_pkts", "tx_256_to_511_pkts", "tx_512_to_1023_pkts", "tx_1024_to_15xx_pkts", "tx_ge_15xx_pkts", "tx_errors", "tx_sgl_col_pkts", "tx_mult_col_pkts", "tx_ex_col_pkts", "tx_late_col_pkts", "tx_def_pkts", "tx_ex_def_pkts", "pm_trunc_bb_overflow", "pm_discard_bb_overflow", "pm_trunc_vfifo_full", "pm_discard_vfifo_full", "pm_trunc_qbb", "pm_discard_qbb", "pm_discard_mapping", "rxdp_q_disabled_pkts", "rxdp_di_dropped_pkts", "rxdp_streaming_pkts", "rxdp_hlb_fetch", "rxdp_hlb_wait", "vadapter_rx_unicast_packets", "vadapter_rx_unicast_bytes", "vadapter_rx_multicast_packets", "vadapter_rx_multicast_bytes", "vadapter_rx_broadcast_packets", "vadapter_rx_broadcast_bytes", "vadapter_rx_bad_packets", "vadapter_rx_bad_bytes", "vadapter_rx_overflow", "vadapter_tx_unicast_packets", "vadapter_tx_unicast_bytes", "vadapter_tx_multicast_packets", "vadapter_tx_multicast_bytes", "vadapter_tx_broadcast_packets", "vadapter_tx_broadcast_bytes", "vadapter_tx_bad_packets", "vadapter_tx_bad_bytes", "vadapter_tx_overflow", }; /* END MKCONFIG GENERATED EfxMacStatNamesBlock */ __checkReturn const char * efx_mac_stat_name( __in efx_nic_t *enp, __in unsigned int id) { _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(id, <, EFX_MAC_NSTATS); return (__efx_mac_stat_name[id]); } #endif /* EFSYS_OPT_NAMES */ - __checkReturn int + __checkReturn efx_rc_t efx_mac_stats_upload( __in efx_nic_t *enp, __in efsys_mem_t *esmp) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); /* * Don't assert !ep_mac_stats_pending, because the client might * have failed to finalise statistics when previously stopping * the port. */ if ((rc = emop->emo_stats_upload(enp, esmp)) != 0) goto fail1; epp->ep_mac_stats_pending = B_TRUE; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mac_stats_periodic( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __in uint16_t period_ms, __in boolean_t events) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); if (emop->emo_stats_periodic == NULL) { rc = EINVAL; goto fail1; } if ((rc = emop->emo_stats_periodic(enp, esmp, period_ms, events)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *essp, __inout_opt uint32_t *generationp) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); rc = emop->emo_stats_update(enp, esmp, essp, generationp); if (rc == 0) epp->ep_mac_stats_pending = B_FALSE; return (rc); } #endif /* EFSYS_OPT_MAC_STATS */ - __checkReturn int + __checkReturn efx_rc_t efx_mac_select( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_mac_type_t type = EFX_MAC_INVALID; efx_mac_ops_t *emop; int rc = EINVAL; #if EFSYS_OPT_HUNTINGTON if (enp->en_family == EFX_FAMILY_HUNTINGTON) { type = EFX_MAC_HUNTINGTON; goto chosen; } #endif #if EFSYS_OPT_SIENA if (enp->en_family == EFX_FAMILY_SIENA) { type = EFX_MAC_SIENA; goto chosen; } #endif #if EFSYS_OPT_FALCON switch (epp->ep_link_mode) { #if EFSYS_OPT_MAC_FALCON_GMAC case EFX_LINK_100HDX: case EFX_LINK_100FDX: case EFX_LINK_1000HDX: case EFX_LINK_1000FDX: type = EFX_MAC_FALCON_GMAC; goto chosen; #endif /* EFSYS_OPT_FALCON_GMAC */ #if EFSYS_OPT_MAC_FALCON_XMAC case EFX_LINK_10000FDX: type = EFX_MAC_FALCON_XMAC; goto chosen; #endif /* EFSYS_OPT_FALCON_XMAC */ default: #if EFSYS_OPT_MAC_FALCON_GMAC && EFSYS_OPT_MAC_FALCON_XMAC /* Only initialise a MAC supported by the PHY */ if (epp->ep_phy_cap_mask & ((1 << EFX_PHY_CAP_1000FDX) | (1 << EFX_PHY_CAP_1000HDX) | (1 << EFX_PHY_CAP_100FDX) | (1 << EFX_PHY_CAP_100HDX) | (1 << EFX_PHY_CAP_10FDX) | (1 << EFX_PHY_CAP_10FDX))) type = EFX_MAC_FALCON_GMAC; else type = EFX_MAC_FALCON_XMAC; #elif EFSYS_OPT_MAC_FALCON_GMAC type = EFX_MAC_FALCON_GMAC; #else type = EFX_MAC_FALCON_XMAC; #endif goto chosen; } #endif /* EFSYS_OPT_FALCON */ chosen: EFSYS_ASSERT(type != EFX_MAC_INVALID); EFSYS_ASSERT3U(type, <, EFX_MAC_NTYPES); emop = epp->ep_emop = (efx_mac_ops_t *)__efx_mac_ops[type]; EFSYS_ASSERT(emop != NULL); epp->ep_mac_type = type; if (emop->emo_reset != NULL) { if ((rc = emop->emo_reset(enp)) != 0) goto fail1; EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_MAC); enp->en_reset_flags &= ~EFX_RESET_MAC; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA /* Compute the multicast hash as used on Falcon and Siena. */ static void falconsiena_mac_multicast_hash_compute( __in_ecount(6*count) uint8_t const *addrs, __in int count, __out efx_oword_t *hash_low, __out efx_oword_t *hash_high) { uint32_t crc, index; int i; EFSYS_ASSERT(hash_low != NULL); EFSYS_ASSERT(hash_high != NULL); EFX_ZERO_OWORD(*hash_low); EFX_ZERO_OWORD(*hash_high); for (i = 0; i < count; i++) { /* Calculate hash bucket (IEEE 802.3 CRC32 of the MAC addr) */ crc = efx_crc32_calculate(0xffffffff, addrs, EFX_MAC_ADDR_LEN); index = crc % EFX_MAC_HASH_BITS; if (index < 128) { EFX_SET_OWORD_BIT(*hash_low, index); } else { EFX_SET_OWORD_BIT(*hash_high, index - 128); } addrs += EFX_MAC_ADDR_LEN; } } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_mac_multicast_list_set( - __in efx_nic_t *enp) + __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; efx_oword_t old_hash[2]; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash)); falconsiena_mac_multicast_hash_compute(epp->ep_mulcst_addr_list, epp->ep_mulcst_addr_count, &epp->ep_multicst_hash[0], &epp->ep_multicst_hash[1]); if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); memcpy(epp->ep_multicst_hash, old_hash, sizeof (old_hash)); return (rc); } #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ Index: stable/10/sys/dev/sfxge/common/efx_mcdi.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_mcdi.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_mcdi.c (revision 293927) @@ -1,1659 +1,1659 @@ /*- * Copyright (c) 2008-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_regs_mcdi.h" #include "efx_impl.h" #if EFSYS_OPT_MCDI #if EFSYS_OPT_SIENA static efx_mcdi_ops_t __efx_mcdi_siena_ops = { siena_mcdi_init, /* emco_init */ siena_mcdi_request_copyin, /* emco_request_copyin */ siena_mcdi_request_poll, /* emco_request_poll */ siena_mcdi_request_copyout, /* emco_request_copyout */ siena_mcdi_poll_reboot, /* emco_poll_reboot */ siena_mcdi_fini, /* emco_fini */ siena_mcdi_fw_update_supported, /* emco_fw_update_supported */ siena_mcdi_macaddr_change_supported, /* emco_macaddr_change_supported */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static efx_mcdi_ops_t __efx_mcdi_hunt_ops = { hunt_mcdi_init, /* emco_init */ hunt_mcdi_request_copyin, /* emco_request_copyin */ hunt_mcdi_request_poll, /* emco_request_poll */ hunt_mcdi_request_copyout, /* emco_request_copyout */ hunt_mcdi_poll_reboot, /* emco_poll_reboot */ hunt_mcdi_fini, /* emco_fini */ hunt_mcdi_fw_update_supported, /* emco_fw_update_supported */ hunt_mcdi_macaddr_change_supported, /* emco_macaddr_change_supported */ }; #endif /* EFSYS_OPT_HUNTINGTON */ - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *emtp) { efx_mcdi_ops_t *emcop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0); switch (enp->en_family) { #if EFSYS_OPT_FALCON case EFX_FAMILY_FALCON: emcop = NULL; emtp = NULL; break; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: emcop = (efx_mcdi_ops_t *)&__efx_mcdi_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: emcop = (efx_mcdi_ops_t *)&__efx_mcdi_hunt_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } if (enp->en_features & EFX_FEATURE_MCDI_DMA) { /* MCDI requires a DMA buffer in host memory */ if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) { rc = EINVAL; goto fail2; } } enp->en_mcdi.em_emtp = emtp; if (emcop != NULL && emcop->emco_init != NULL) { if ((rc = emcop->emco_init(enp, emtp)) != 0) goto fail3; } enp->en_mcdi.em_emcop = emcop; enp->en_mod_flags |= EFX_MOD_MCDI; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_mcdi.em_emcop = NULL; enp->en_mcdi.em_emtp = NULL; enp->en_mod_flags &= ~EFX_MOD_MCDI; return (rc); } void efx_mcdi_fini( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI); if (emcop != NULL && emcop->emco_fini != NULL) emcop->emco_fini(enp); emip->emi_port = 0; emip->emi_aborted = 0; enp->en_mcdi.em_emcop = NULL; enp->en_mod_flags &= ~EFX_MOD_MCDI; } void efx_mcdi_new_epoch( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); int state; /* Start a new epoch (allow fresh MCDI requests to succeed) */ EFSYS_LOCK(enp->en_eslp, state); emip->emi_new_epoch = B_TRUE; EFSYS_UNLOCK(enp->en_eslp, state); } void efx_mcdi_request_start( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __in boolean_t ev_cpl) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; unsigned int seq; boolean_t new_epoch; int state; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); if (emcop == NULL || emcop->emco_request_copyin == NULL) return; /* * efx_mcdi_request_start() is naturally serialised against both * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(), * by virtue of there only being one outstanding MCDI request. * Unfortunately, upper layers may also call efx_mcdi_request_abort() * at any time, to timeout a pending mcdi request, That request may * then subsequently complete, meaning efx_mcdi_ev_cpl() or * efx_mcdi_ev_death() may end up running in parallel with * efx_mcdi_request_start(). This race is handled by ensuring that * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the * en_eslp lock. */ EFSYS_LOCK(enp->en_eslp, state); EFSYS_ASSERT(emip->emi_pending_req == NULL); emip->emi_pending_req = emrp; emip->emi_ev_cpl = ev_cpl; emip->emi_poll_cnt = 0; seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ); new_epoch = emip->emi_new_epoch; EFSYS_UNLOCK(enp->en_eslp, state); emcop->emco_request_copyin(enp, emrp, seq, ev_cpl, new_epoch); } __checkReturn boolean_t efx_mcdi_request_poll( __in efx_nic_t *enp) { efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; boolean_t completed; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); completed = B_FALSE; if (emcop != NULL && emcop->emco_request_poll != NULL) completed = emcop->emco_request_poll(enp); return (completed); } __checkReturn boolean_t efx_mcdi_request_abort( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_mcdi_req_t *emrp; boolean_t aborted; int state; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); /* * efx_mcdi_ev_* may have already completed this event, and be * spinning/blocked on the upper layer lock. So it *is* legitimate * to for emi_pending_req to be NULL. If there is a pending event * completed request, then provide a "credit" to allow * efx_mcdi_ev_cpl() to accept a single spurious completion. */ EFSYS_LOCK(enp->en_eslp, state); emrp = emip->emi_pending_req; aborted = (emrp != NULL); if (aborted) { emip->emi_pending_req = NULL; /* Error the request */ emrp->emr_out_length_used = 0; emrp->emr_rc = ETIMEDOUT; /* Provide a credit for seqno/emr_pending_req mismatches */ if (emip->emi_ev_cpl) ++emip->emi_aborted; /* * The upper layer has called us, so we don't * need to complete the request. */ } EFSYS_UNLOCK(enp->en_eslp, state); return (aborted); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_request_errcode( __in unsigned int err) { switch (err) { /* MCDI v1 */ case MC_CMD_ERR_EPERM: return (EACCES); case MC_CMD_ERR_ENOENT: return (ENOENT); case MC_CMD_ERR_EINTR: return (EINTR); case MC_CMD_ERR_EACCES: return (EACCES); case MC_CMD_ERR_EBUSY: return (EBUSY); case MC_CMD_ERR_EINVAL: return (EINVAL); case MC_CMD_ERR_EDEADLK: return (EDEADLK); case MC_CMD_ERR_ENOSYS: return (ENOTSUP); case MC_CMD_ERR_ETIME: return (ETIMEDOUT); case MC_CMD_ERR_ENOTSUP: return (ENOTSUP); case MC_CMD_ERR_EALREADY: return (EALREADY); /* MCDI v2 */ #ifdef MC_CMD_ERR_EAGAIN case MC_CMD_ERR_EAGAIN: return (EAGAIN); #endif #ifdef MC_CMD_ERR_ENOSPC case MC_CMD_ERR_ENOSPC: return (ENOSPC); #endif case MC_CMD_ERR_ALLOC_FAIL: return (ENOMEM); case MC_CMD_ERR_NO_VADAPTOR: return (ENOENT); case MC_CMD_ERR_NO_EVB_PORT: return (ENOENT); case MC_CMD_ERR_NO_VSWITCH: return (ENODEV); case MC_CMD_ERR_VLAN_LIMIT: return (EINVAL); case MC_CMD_ERR_BAD_PCI_FUNC: return (ENODEV); case MC_CMD_ERR_BAD_VLAN_MODE: return (EINVAL); case MC_CMD_ERR_BAD_VSWITCH_TYPE: return (EINVAL); case MC_CMD_ERR_BAD_VPORT_TYPE: return (EINVAL); case MC_CMD_ERR_MAC_EXIST: return (EEXIST); default: EFSYS_PROBE1(mc_pcol_error, int, err); return (EIO); } } void efx_mcdi_raise_exception( __in efx_nic_t *enp, __in_opt efx_mcdi_req_t *emrp, __in int rc) { const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; efx_mcdi_exception_t exception; /* Reboot or Assertion failure only */ EFSYS_ASSERT(rc == EIO || rc == EINTR); /* * If MC_CMD_REBOOT causes a reboot (dependent on parameters), * then the EIO is not worthy of an exception. */ if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO) return; exception = (rc == EIO) ? EFX_MCDI_EXCEPTION_MC_REBOOT : EFX_MCDI_EXCEPTION_MC_BADASSERT; emtp->emt_exception(emtp->emt_context, exception); } -static int +static efx_rc_t efx_mcdi_poll_reboot( __in efx_nic_t *enp) { efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; return (emcop->emco_poll_reboot(enp)); } void efx_mcdi_execute( __in efx_nic_t *enp, __inout efx_mcdi_req_t *emrp) { const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); emrp->emr_quiet = B_FALSE; emtp->emt_execute(emtp->emt_context, emrp); } void efx_mcdi_execute_quiet( __in efx_nic_t *enp, __inout efx_mcdi_req_t *emrp) { const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); emrp->emr_quiet = B_TRUE; emtp->emt_execute(emtp->emt_context, emrp); } void efx_mcdi_ev_cpl( __in efx_nic_t *enp, __in unsigned int seq, __in unsigned int outlen, __in int errcode) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; efx_mcdi_req_t *emrp; int state; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); /* * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start() * when we're completing an aborted request. */ EFSYS_LOCK(enp->en_eslp, state); if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl || (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { EFSYS_ASSERT(emip->emi_aborted > 0); if (emip->emi_aborted > 0) --emip->emi_aborted; EFSYS_UNLOCK(enp->en_eslp, state); return; } emrp = emip->emi_pending_req; emip->emi_pending_req = NULL; EFSYS_UNLOCK(enp->en_eslp, state); /* * Fill out the remaining hdr fields, and copyout the payload * if the user supplied an output buffer. */ if (errcode != 0) { if (!emrp->emr_quiet) { EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd, int, errcode); } emrp->emr_out_length_used = 0; emrp->emr_rc = efx_mcdi_request_errcode(errcode); } else { emrp->emr_out_length_used = outlen; emrp->emr_rc = 0; emcop->emco_request_copyout(enp, emrp); } emtp->emt_ev_cpl(emtp->emt_context); } void efx_mcdi_ev_death( __in efx_nic_t *enp, __in int rc) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; efx_mcdi_req_t *emrp = NULL; boolean_t ev_cpl; int state; /* * The MCDI request (if there is one) has been terminated, either * by a BADASSERT or REBOOT event. * * If there is an outstanding event-completed MCDI operation, then we * will never receive the completion event (because both MCDI * completions and BADASSERT events are sent to the same evq). So * complete this MCDI op. * * This function might run in parallel with efx_mcdi_request_poll() * for poll completed mcdi requests, and also with * efx_mcdi_request_start() for post-watchdog completions. */ EFSYS_LOCK(enp->en_eslp, state); emrp = emip->emi_pending_req; ev_cpl = emip->emi_ev_cpl; if (emrp != NULL && emip->emi_ev_cpl) { emip->emi_pending_req = NULL; emrp->emr_out_length_used = 0; emrp->emr_rc = rc; ++emip->emi_aborted; } /* * Since we're running in parallel with a request, consume the * status word before dropping the lock. */ if (rc == EIO || rc == EINTR) { EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); (void) efx_mcdi_poll_reboot(enp); emip->emi_new_epoch = B_TRUE; } EFSYS_UNLOCK(enp->en_eslp, state); efx_mcdi_raise_exception(enp, emrp, rc); if (emrp != NULL && ev_cpl) emtp->emt_ev_cpl(emtp->emt_context); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_version( __in efx_nic_t *enp, __out_ecount_opt(4) uint16_t versionp[4], __out_opt uint32_t *buildp, __out_opt efx_mcdi_boot_t *statusp) { efx_mcdi_req_t req; uint8_t payload[MAX(MAX(MC_CMD_GET_VERSION_IN_LEN, MC_CMD_GET_VERSION_OUT_LEN), MAX(MC_CMD_GET_BOOT_STATUS_IN_LEN, MC_CMD_GET_BOOT_STATUS_OUT_LEN))]; efx_word_t *ver_words; uint16_t version[4]; uint32_t build; efx_mcdi_boot_t status; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_VERSION; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } /* bootrom support */ if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) { version[0] = version[1] = version[2] = version[3] = 0; build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); goto version; } if (req.emr_out_length_used < MC_CMD_GET_VERSION_OUT_LEN) { rc = EMSGSIZE; goto fail2; } ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION); version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0); version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0); version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0); version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0); build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); version: /* The bootrom doesn't understand BOOT_STATUS */ if (MC_FW_VERSION_IS_BOOTLOADER(build)) { status = EFX_MCDI_BOOT_ROM; goto out; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_BOOT_STATUS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN; efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc == EACCES) { /* Unprivileged functions cannot access BOOT_STATUS */ status = EFX_MCDI_BOOT_PRIMARY; version[0] = version[1] = version[2] = version[3] = 0; build = 0; goto out; } if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) { rc = EMSGSIZE; goto fail4; } if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS, GET_BOOT_STATUS_OUT_FLAGS_PRIMARY)) status = EFX_MCDI_BOOT_PRIMARY; else status = EFX_MCDI_BOOT_SECONDARY; out: if (versionp != NULL) memcpy(versionp, version, sizeof (version)); if (buildp != NULL) *buildp = build; if (statusp != NULL) *statusp = status; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_do_reboot( __in efx_nic_t *enp, __in boolean_t after_assertion) { uint8_t payload[MAX(MC_CMD_REBOOT_IN_LEN, MC_CMD_REBOOT_OUT_LEN)]; efx_mcdi_req_t req; - int rc; + efx_rc_t rc; /* * We could require the caller to have caused en_mod_flags=0 to * call this function. This doesn't help the other port though, * who's about to get the MC ripped out from underneath them. * Since they have to cope with the subsequent fallout of MCDI * failures, we should as well. */ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_REBOOT; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_REBOOT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_REBOOT_OUT_LEN; MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS, (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0)); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc == EACCES) { /* Unprivileged functions cannot reboot the MC. */ goto out; } /* A successful reboot request returns EIO. */ if (req.emr_rc != 0 && req.emr_rc != EIO) { rc = req.emr_rc; goto fail1; } out: return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_reboot( __in efx_nic_t *enp) { return (efx_mcdi_do_reboot(enp, B_FALSE)); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_exit_assertion_handler( __in efx_nic_t *enp) { return (efx_mcdi_do_reboot(enp, B_TRUE)); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_read_assertion( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_ASSERTS_IN_LEN, MC_CMD_GET_ASSERTS_OUT_LEN)]; const char *reason; unsigned int flags; unsigned int index; unsigned int ofst; int retry; - int rc; + efx_rc_t rc; /* * Before we attempt to chat to the MC, we should verify that the MC * isn't in it's assertion handler, either due to a previous reboot, * or because we're reinitializing due to an eec_exception(). * * Use GET_ASSERTS to read any assertion state that may be present. * Retry this command twice. Once because a boot-time assertion failure * might cause the 1st MCDI request to fail. And once again because * we might race with efx_mcdi_exit_assertion_handler() running on * partner port(s) on the same NIC. */ retry = 2; do { (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_ASSERTS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN; MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1); efx_mcdi_execute_quiet(enp, &req); } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0); if (req.emr_rc != 0) { if (req.emr_rc == EACCES) { /* Unprivileged functions cannot clear assertions. */ goto out; } rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) { rc = EMSGSIZE; goto fail2; } /* Print out any assertion state recorded */ flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS); if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) return (0); reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) ? "system-level assertion" : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) ? "thread-level assertion" : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) ? "watchdog reset" : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP) ? "illegal address trap" : "unknown assertion"; EFSYS_PROBE3(mcpu_assertion, const char *, reason, unsigned int, MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS), unsigned int, MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS)); /* Print out the registers (r1 ... r31) */ ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; for (index = 1; index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; index++) { EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int, EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst), EFX_DWORD_0)); ofst += sizeof (efx_dword_t); } EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN); out: return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Internal routines for for specific MCDI requests. */ - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_drv_attach( __in efx_nic_t *enp, __in boolean_t attach) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_DRV_ATTACH_IN_LEN, MC_CMD_DRV_ATTACH_EXT_OUT_LEN)]; uint32_t flags; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_DRV_ATTACH; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN; /* * Use DONT_CARE for the datapath firmware type to ensure that the * driver can attach to an unprivileged function. The datapath firmware * type to use is controlled by the 'sfboot' utility. */ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0); MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1); MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_DONT_CARE); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) { rc = EMSGSIZE; goto fail2; } if (attach == B_FALSE) { flags = 0; } else if (enp->en_family == EFX_FAMILY_SIENA) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); /* Create synthetic privileges for Siena functions */ flags = EFX_NIC_FUNC_LINKCTRL | EFX_NIC_FUNC_TRUSTED; if (emip->emi_port == 1) flags |= EFX_NIC_FUNC_PRIMARY; } else { EFX_STATIC_ASSERT(EFX_NIC_FUNC_PRIMARY == (1u << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)); EFX_STATIC_ASSERT(EFX_NIC_FUNC_LINKCTRL == (1u << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)); EFX_STATIC_ASSERT(EFX_NIC_FUNC_TRUSTED == (1u << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)); /* Save function privilege flags (EF10 and later) */ if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_EXT_OUT_LEN) { rc = EMSGSIZE; goto fail3; } flags = MCDI_OUT_DWORD(req, DRV_ATTACH_EXT_OUT_FUNC_FLAGS); } encp->enc_func_flags = flags; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_get_board_cfg( __in efx_nic_t *enp, __out_opt uint32_t *board_typep, __out_opt efx_dword_t *capabilitiesp, __out_ecount_opt(6) uint8_t mac_addrp[6]) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN, MC_CMD_GET_BOARD_CFG_OUT_LENMIN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_BOARD_CFG; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { rc = EMSGSIZE; goto fail2; } if (mac_addrp != NULL) { uint8_t *addrp; if (emip->emi_port == 1) { addrp = MCDI_OUT2(req, uint8_t, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0); } else if (emip->emi_port == 2) { addrp = MCDI_OUT2(req, uint8_t, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1); } else { rc = EINVAL; goto fail3; } EFX_MAC_ADDR_COPY(mac_addrp, addrp); } if (capabilitiesp != NULL) { if (emip->emi_port == 1) { *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); } else if (emip->emi_port == 2) { *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); } else { rc = EINVAL; goto fail4; } } if (board_typep != NULL) { *board_typep = MCDI_OUT_DWORD(req, GET_BOARD_CFG_OUT_BOARD_TYPE); } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_get_resource_limits( __in efx_nic_t *enp, __out_opt uint32_t *nevqp, __out_opt uint32_t *nrxqp, __out_opt uint32_t *ntxqp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_RESOURCE_LIMITS_IN_LEN, MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) { rc = EMSGSIZE; goto fail2; } if (nevqp != NULL) *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ); if (nrxqp != NULL) *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ); if (ntxqp != NULL) *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_get_phy_cfg( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN, MC_CMD_GET_PHY_CFG_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PHY_CFG; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) { rc = EMSGSIZE; goto fail2; } encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE); #if EFSYS_OPT_NAMES (void) strncpy(encp->enc_phy_name, MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME), MIN(sizeof (encp->enc_phy_name) - 1, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN)); #endif /* EFSYS_OPT_NAMES */ (void) memset(encp->enc_phy_revision, 0, sizeof (encp->enc_phy_revision)); memcpy(encp->enc_phy_revision, MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION), MIN(sizeof (encp->enc_phy_revision) - 1, MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN)); #if EFSYS_OPT_PHY_LED_CONTROL encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) | (1 << EFX_PHY_LED_OFF) | (1 << EFX_PHY_LED_ON)); #endif /* EFSYS_OPT_PHY_LED_CONTROL */ #if EFSYS_OPT_PHY_PROPS encp->enc_phy_nprops = 0; #endif /* EFSYS_OPT_PHY_PROPS */ /* Get the media type of the fixed port, if recognised. */ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI); EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4); EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4); EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP); EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS); EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T); EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS); epp->ep_fixed_port_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE); if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES) epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID; epp->ep_phy_cap_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP); #if EFSYS_OPT_PHY_FLAGS encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS); #endif /* EFSYS_OPT_PHY_FLAGS */ encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT); /* Populate internal state */ encp->enc_mcdi_mdio_channel = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL); #if EFSYS_OPT_PHY_STATS encp->enc_mcdi_phy_stat_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_BIST encp->enc_bist_mask = 0; if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, GET_PHY_CFG_OUT_BIST_CABLE_SHORT)) encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT); if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, GET_PHY_CFG_OUT_BIST_CABLE_LONG)) encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG); if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, GET_PHY_CFG_OUT_BIST)) encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL); #endif /* EFSYS_OPT_BIST */ return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_firmware_update_supported( __in efx_nic_t *enp, __out boolean_t *supportedp) { efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; - int rc; + efx_rc_t rc; if (emcop != NULL && emcop->emco_fw_update_supported != NULL) { if ((rc = emcop->emco_fw_update_supported(enp, supportedp)) != 0) goto fail1; } else { /* Earlier devices always supported updates */ *supportedp = B_TRUE; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_macaddr_change_supported( __in efx_nic_t *enp, __out boolean_t *supportedp) { efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; - int rc; + efx_rc_t rc; if (emcop != NULL && emcop->emco_macaddr_change_supported != NULL) { if ((rc = emcop->emco_macaddr_change_supported(enp, supportedp)) != 0) goto fail1; } else { /* Earlier devices always supported MAC changes */ *supportedp = B_TRUE; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_BIST #if EFSYS_OPT_HUNTINGTON /* * Enter bist offline mode. This is a fw mode which puts the NIC into a state * where memory BIST tests can be run and not much else can interfere or happen. * A reboot is required to exit this mode. */ - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_bist_enable_offline( __in efx_nic_t *enp) { efx_mcdi_req_t req; - int rc; + efx_rc_t rc; EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0); EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0); req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST; req.emr_in_buf = NULL; req.emr_in_length = 0; req.emr_out_buf = NULL; req.emr_out_length = 0; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_HUNTINGTON */ - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_START_BIST_IN_LEN, MC_CMD_START_BIST_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_START_BIST; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_START_BIST_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_START_BIST_OUT_LEN; switch (type) { case EFX_BIST_TYPE_PHY_NORMAL: MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST); break; case EFX_BIST_TYPE_PHY_CABLE_SHORT: MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST_CABLE_SHORT); break; case EFX_BIST_TYPE_PHY_CABLE_LONG: MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST_CABLE_LONG); break; case EFX_BIST_TYPE_MC_MEM: MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_MC_MEM_BIST); break; case EFX_BIST_TYPE_SAT_MEM: MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PORT_MEM_BIST); break; case EFX_BIST_TYPE_REG: MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_REG_BIST); break; default: EFSYS_ASSERT(0); } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_BIST */ /* Enable logging of some events (e.g. link state changes) */ - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_log_ctrl( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_LOG_CTRL_IN_LEN, MC_CMD_LOG_CTRL_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LOG_CTRL; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN; MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST, MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ); MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_MAC_STATS typedef enum efx_stats_action_e { EFX_STATS_CLEAR, EFX_STATS_UPLOAD, EFX_STATS_ENABLE_NOEVENTS, EFX_STATS_ENABLE_EVENTS, EFX_STATS_DISABLE, } efx_stats_action_t; -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_mac_stats( __in efx_nic_t *enp, __in_opt efsys_mem_t *esmp, __in efx_stats_action_t action) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN, MC_CMD_MAC_STATS_OUT_DMA_LEN)]; int clear = (action == EFX_STATS_CLEAR); int upload = (action == EFX_STATS_UPLOAD); int enable = (action == EFX_STATS_ENABLE_NOEVENTS); int events = (action == EFX_STATS_ENABLE_EVENTS); int disable = (action == EFX_STATS_DISABLE); - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_MAC_STATS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_MAC_STATS_OUT_DMA_LEN; MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD, MAC_STATS_IN_DMA, upload, MAC_STATS_IN_CLEAR, clear, MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable, MAC_STATS_IN_PERIODIC_ENABLE, enable | events, MAC_STATS_IN_PERIODIC_NOEVENT, !events, MAC_STATS_IN_PERIOD_MS, (enable | events) ? 1000: 0); if (esmp != NULL) { int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t); EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <= EFX_MAC_STATS_SIZE); MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO, EFSYS_MEM_ADDR(esmp) & 0xffffffff); MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI, EFSYS_MEM_ADDR(esmp) >> 32); MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes); } else { EFSYS_ASSERT(!upload && !enable && !events); } /* * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats, * as this may fail (and leave periodic DMA enabled) if the * vadapter has already been deleted. */ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID, (disable ? EVB_PORT_ID_NULL : enp->en_vport_id)); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { /* EF10: Expect ENOENT if no DMA queues are initialised */ if ((req.emr_rc != ENOENT) || (enp->en_rx_qcount + enp->en_tx_qcount != 0)) { rc = req.emr_rc; goto fail1; } } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_mac_stats_clear( __in efx_nic_t *enp) { - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_mac_stats_upload( __in efx_nic_t *enp, __in efsys_mem_t *esmp) { - int rc; + efx_rc_t rc; /* * The MC DMAs aggregate statistics for our convenience, so we can * avoid having to pull the statistics buffer into the cache to * maintain cumulative statistics. */ if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_mac_stats_periodic( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __in uint16_t period, __in boolean_t events) { - int rc; + efx_rc_t rc; /* * The MC DMAs aggregate statistics for our convenience, so we can * avoid having to pull the statistics buffer into the cache to * maintain cumulative statistics. * Huntington uses a fixed 1sec period, so use that on Siena too. */ if (period == 0) rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE); else if (events) rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS); else rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS); if (rc != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_MAC_STATS */ #if EFSYS_OPT_HUNTINGTON /* * This function returns the pf and vf number of a function. If it is a pf the * vf number is 0xffff. The vf number is the index of the vf on that * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0), * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff). */ - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_get_function_info( __in efx_nic_t *enp, __out uint32_t *pfp, __out_opt uint32_t *vfp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_FUNCTION_INFO_IN_LEN, MC_CMD_GET_FUNCTION_INFO_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_FUNCTION_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF); if (vfp != NULL) *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_privilege_mask( __in efx_nic_t *enp, __in uint32_t pf, __in uint32_t vf, __out uint32_t *maskp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_PRIVILEGE_MASK_IN_LEN, MC_CMD_PRIVILEGE_MASK_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_PRIVILEGE_MASK; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN; MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION, PRIVILEGE_MASK_IN_FUNCTION_PF, pf, PRIVILEGE_MASK_IN_FUNCTION_VF, vf); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_HUNTINGTON */ - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_set_workaround( __in efx_nic_t *enp, __in uint32_t type, __in boolean_t enabled, __out_opt uint32_t *flagsp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_WORKAROUND_IN_LEN, MC_CMD_WORKAROUND_EXT_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_WORKAROUND; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN; MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type); MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (flagsp != NULL) { if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN) *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS); else *flagsp = 0; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_get_workarounds( __in efx_nic_t *enp, __out_opt uint32_t *implementedp, __out_opt uint32_t *enabledp) { efx_mcdi_req_t req; uint8_t payload[MC_CMD_GET_WORKAROUNDS_OUT_LEN]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_WORKAROUNDS; req.emr_in_buf = NULL; req.emr_in_length = 0; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (implementedp != NULL) { *implementedp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED); } if (enabledp != NULL) { *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED); } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_MCDI */ Index: stable/10/sys/dev/sfxge/common/efx_mcdi.h =================================================================== --- stable/10/sys/dev/sfxge/common/efx_mcdi.h (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_mcdi.h (revision 293927) @@ -1,357 +1,357 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_EFX_MCDI_H #define _SYS_EFX_MCDI_H #include "efx.h" #include "efx_regs.h" #include "efx_regs_mcdi.h" #ifdef __cplusplus extern "C" { #endif /* * A reboot/assertion causes the MCDI status word to be set after the * command word is set or a REBOOT event is sent. If we notice a reboot * via these mechanisms then wait 10ms for the status word to be set. */ #define EFX_MCDI_STATUS_SLEEP_US 10000 struct efx_mcdi_req_s { boolean_t emr_quiet; /* Inputs: Command #, input buffer and length */ unsigned int emr_cmd; uint8_t *emr_in_buf; size_t emr_in_length; /* Outputs: retcode, buffer, length, and length used*/ int emr_rc; uint8_t *emr_out_buf; size_t emr_out_length; size_t emr_out_length_used; }; typedef struct efx_mcdi_iface_s { unsigned int emi_port; unsigned int emi_seq; efx_mcdi_req_t *emi_pending_req; boolean_t emi_ev_cpl; boolean_t emi_new_epoch; int emi_aborted; uint32_t emi_poll_cnt; uint32_t emi_mc_reboot_status; } efx_mcdi_iface_t; extern void efx_mcdi_execute( __in efx_nic_t *enp, __inout efx_mcdi_req_t *emrp); extern void efx_mcdi_execute_quiet( __in efx_nic_t *enp, __inout efx_mcdi_req_t *emrp); extern void efx_mcdi_ev_cpl( __in efx_nic_t *enp, __in unsigned int seq, __in unsigned int outlen, __in int errcode); extern void efx_mcdi_ev_death( __in efx_nic_t *enp, __in int rc); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_request_errcode( __in unsigned int err); extern void efx_mcdi_raise_exception( __in efx_nic_t *enp, __in_opt efx_mcdi_req_t *emrp, __in int rc); typedef enum efx_mcdi_boot_e { EFX_MCDI_BOOT_PRIMARY, EFX_MCDI_BOOT_SECONDARY, EFX_MCDI_BOOT_ROM, } efx_mcdi_boot_t; -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_version( __in efx_nic_t *enp, __out_ecount_opt(4) uint16_t versionp[4], __out_opt uint32_t *buildp, __out_opt efx_mcdi_boot_t *statusp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_read_assertion( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_exit_assertion_handler( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_drv_attach( __in efx_nic_t *enp, __in boolean_t attach); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_get_board_cfg( __in efx_nic_t *enp, __out_opt uint32_t *board_typep, __out_opt efx_dword_t *capabilitiesp, __out_ecount_opt(6) uint8_t mac_addrp[6]); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_get_phy_cfg( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_firmware_update_supported( __in efx_nic_t *enp, __out boolean_t *supportedp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_macaddr_change_supported( __in efx_nic_t *enp, __out boolean_t *supportedp); #if EFSYS_OPT_BIST #if EFSYS_OPT_HUNTINGTON -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_bist_enable_offline( __in efx_nic_t *enp); #endif /* EFSYS_OPT_HUNTINGTON */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type); #endif /* EFSYS_OPT_BIST */ -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_get_resource_limits( __in efx_nic_t *enp, __out_opt uint32_t *nevqp, __out_opt uint32_t *nrxqp, __out_opt uint32_t *ntxqp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_log_ctrl( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_mac_stats_clear( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_mac_stats_upload( __in efx_nic_t *enp, __in efsys_mem_t *esmp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_mac_stats_periodic( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __in uint16_t period, __in boolean_t events); #if EFSYS_OPT_LOOPBACK -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_get_loopback_modes( __in efx_nic_t *enp); #endif /* EFSYS_OPT_LOOPBACK */ #define MCDI_IN(_emr, _type, _ofst) \ ((_type *)((_emr).emr_in_buf + (_ofst))) #define MCDI_IN2(_emr, _type, _ofst) \ MCDI_IN(_emr, _type, MC_CMD_ ## _ofst ## _OFST) #define MCDI_IN_SET_BYTE(_emr, _ofst, _value) \ EFX_POPULATE_BYTE_1(*MCDI_IN2(_emr, efx_byte_t, _ofst), \ EFX_BYTE_0, _value) #define MCDI_IN_SET_WORD(_emr, _ofst, _value) \ EFX_POPULATE_WORD_1(*MCDI_IN2(_emr, efx_word_t, _ofst), \ EFX_WORD_0, _value) #define MCDI_IN_SET_DWORD(_emr, _ofst, _value) \ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ EFX_DWORD_0, _value) #define MCDI_IN_SET_DWORD_FIELD(_emr, _ofst, _field, _value) \ EFX_SET_DWORD_FIELD(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field, _value) #define MCDI_IN_POPULATE_DWORD_1(_emr, _ofst, _field1, _value1) \ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1) #define MCDI_IN_POPULATE_DWORD_2(_emr, _ofst, _field1, _value1, \ _field2, _value2) \ EFX_POPULATE_DWORD_2(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2) #define MCDI_IN_POPULATE_DWORD_3(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3) \ EFX_POPULATE_DWORD_3(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3) #define MCDI_IN_POPULATE_DWORD_4(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4) \ EFX_POPULATE_DWORD_4(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4) #define MCDI_IN_POPULATE_DWORD_5(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4, \ _field5, _value5) \ EFX_POPULATE_DWORD_5(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4, \ MC_CMD_ ## _field5, _value5) #define MCDI_IN_POPULATE_DWORD_6(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6) \ EFX_POPULATE_DWORD_6(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4, \ MC_CMD_ ## _field5, _value5, \ MC_CMD_ ## _field6, _value6) #define MCDI_IN_POPULATE_DWORD_7(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, _field7, _value7) \ EFX_POPULATE_DWORD_7(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4, \ MC_CMD_ ## _field5, _value5, \ MC_CMD_ ## _field6, _value6, \ MC_CMD_ ## _field7, _value7) #define MCDI_IN_POPULATE_DWORD_8(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, _field7, _value7, \ _field8, _value8) \ EFX_POPULATE_DWORD_8(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4, \ MC_CMD_ ## _field5, _value5, \ MC_CMD_ ## _field6, _value6, \ MC_CMD_ ## _field7, _value7, \ MC_CMD_ ## _field8, _value8) #define MCDI_IN_POPULATE_DWORD_9(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, _field7, _value7, \ _field8, _value8, _field9, _value9) \ EFX_POPULATE_DWORD_9(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4, \ MC_CMD_ ## _field5, _value5, \ MC_CMD_ ## _field6, _value6, \ MC_CMD_ ## _field7, _value7, \ MC_CMD_ ## _field8, _value8, \ MC_CMD_ ## _field9, _value9) #define MCDI_IN_POPULATE_DWORD_10(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, _field7, _value7, \ _field8, _value8, _field9, _value9, _field10, _value10) \ EFX_POPULATE_DWORD_10(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4, \ MC_CMD_ ## _field5, _value5, \ MC_CMD_ ## _field6, _value6, \ MC_CMD_ ## _field7, _value7, \ MC_CMD_ ## _field8, _value8, \ MC_CMD_ ## _field9, _value9, \ MC_CMD_ ## _field10, _value10) #define MCDI_OUT(_emr, _type, _ofst) \ ((_type *)((_emr).emr_out_buf + (_ofst))) #define MCDI_OUT2(_emr, _type, _ofst) \ MCDI_OUT(_emr, _type, MC_CMD_ ## _ofst ## _OFST) #define MCDI_OUT_BYTE(_emr, _ofst) \ EFX_BYTE_FIELD(*MCDI_OUT2(_emr, efx_byte_t, _ofst), \ EFX_BYTE_0) #define MCDI_OUT_WORD(_emr, _ofst) \ EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \ EFX_WORD_0) #define MCDI_OUT_DWORD(_emr, _ofst) \ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \ EFX_DWORD_0) #define MCDI_OUT_DWORD_FIELD(_emr, _ofst, _field) \ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field) #define MCDI_EV_FIELD(_eqp, _field) \ EFX_QWORD_FIELD(*_eqp, MCDI_EVENT_ ## _field) #define MCDI_CMD_DWORD_FIELD(_edp, _field) \ EFX_DWORD_FIELD(*_edp, MC_CMD_ ## _field) #ifdef __cplusplus } #endif #endif /* _SYS_EFX_MCDI_H */ Index: stable/10/sys/dev/sfxge/common/efx_mon.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_mon.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_mon.c (revision 293927) @@ -1,346 +1,346 @@ /*- * Copyright (c) 2007-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_MON_NULL #include "nullmon.h" #endif #if EFSYS_OPT_MON_LM87 #include "lm87.h" #endif #if EFSYS_OPT_MON_MAX6647 #include "max6647.h" #endif #if EFSYS_OPT_MON_MCDI #include "mcdi_mon.h" #endif #if EFSYS_OPT_NAMES static const char *__efx_mon_name[] = { "", "nullmon", "lm87", "max6647", "sfx90x0", "sfx91x0" }; const char * efx_mon_name( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID); EFSYS_ASSERT3U(encp->enc_mon_type, <, EFX_MON_NTYPES); return (__efx_mon_name[encp->enc_mon_type]); } #endif /* EFSYS_OPT_NAMES */ #if EFSYS_OPT_MON_NULL static efx_mon_ops_t __efx_mon_null_ops = { nullmon_reset, /* emo_reset */ nullmon_reconfigure, /* emo_reconfigure */ #if EFSYS_OPT_MON_STATS nullmon_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MON_STATS */ }; #endif #if EFSYS_OPT_MON_LM87 static efx_mon_ops_t __efx_mon_lm87_ops = { lm87_reset, /* emo_reset */ lm87_reconfigure, /* emo_reconfigure */ #if EFSYS_OPT_MON_STATS lm87_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MON_STATS */ }; #endif #if EFSYS_OPT_MON_MAX6647 static efx_mon_ops_t __efx_mon_max6647_ops = { max6647_reset, /* emo_reset */ max6647_reconfigure, /* emo_reconfigure */ #if EFSYS_OPT_MON_STATS max6647_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MON_STATS */ }; #endif #if EFSYS_OPT_MON_MCDI static efx_mon_ops_t __efx_mon_mcdi_ops = { NULL, /* emo_reset */ NULL, /* emo_reconfigure */ #if EFSYS_OPT_MON_STATS mcdi_mon_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MON_STATS */ }; #endif static efx_mon_ops_t *__efx_mon_ops[] = { NULL, #if EFSYS_OPT_MON_NULL &__efx_mon_null_ops, #else NULL, #endif #if EFSYS_OPT_MON_LM87 &__efx_mon_lm87_ops, #else NULL, #endif #if EFSYS_OPT_MON_MAX6647 &__efx_mon_max6647_ops, #else NULL, #endif #if EFSYS_OPT_MON_MCDI &__efx_mon_mcdi_ops, #else NULL, #endif #if EFSYS_OPT_MON_MCDI &__efx_mon_mcdi_ops #else NULL #endif }; - __checkReturn int + __checkReturn efx_rc_t efx_mon_init( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mon_t *emp = &(enp->en_mon); efx_mon_ops_t *emop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); if (enp->en_mod_flags & EFX_MOD_MON) { rc = EINVAL; goto fail1; } enp->en_mod_flags |= EFX_MOD_MON; emp->em_type = encp->enc_mon_type; EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID); EFSYS_ASSERT3U(emp->em_type, <, EFX_MON_NTYPES); if ((emop = (efx_mon_ops_t *)__efx_mon_ops[emp->em_type]) == NULL) { rc = ENOTSUP; goto fail2; } if (emop->emo_reset != NULL) { if ((rc = emop->emo_reset(enp)) != 0) goto fail3; } if (emop->emo_reconfigure != NULL) { if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail4; } emp->em_emop = emop; return (0); fail4: EFSYS_PROBE(fail5); if (emop->emo_reset != NULL) (void) emop->emo_reset(enp); fail3: EFSYS_PROBE(fail4); fail2: EFSYS_PROBE(fail3); emp->em_type = EFX_MON_INVALID; enp->en_mod_flags &= ~EFX_MOD_MON; fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_MON_STATS #if EFSYS_OPT_NAMES /* START MKCONFIG GENERATED MonitorStatNamesBlock b9328f15438c4d01 */ static const char *__mon_stat_name[] = { "value_2_5v", "value_vccp1", "value_vcc", "value_5v", "value_12v", "value_vccp2", "value_ext_temp", "value_int_temp", "value_ain1", "value_ain2", "controller_cooling", "ext_cooling", "1v", "1_2v", "1_8v", "3_3v", "1_2va", "vref", "vaoe", "aoe_temperature", "psu_aoe_temperature", "psu_temperature", "fan0", "fan1", "fan2", "fan3", "fan4", "vaoe_in", "iaoe", "iaoe_in", "nic_power", "0_9v", "i0_9v", "i1_2v", "0_9v_adc", "controller_temperature2", "vreg_temperature", "vreg_0_9v_temperature", "vreg_1_2v_temperature", "int_vptat", "controller_internal_adc_temperature", "ext_vptat", "controller_external_adc_temperature", "ambient_temperature", "airflow", "vdd08d_vss08d_csr", "vdd08d_vss08d_csr_extadc", "hotpoint_temperature", "phy_power_switch_port0", "phy_power_switch_port1", "mum_vcc", "0v9_a", "i0v9_a", "0v9_a_temp", "0v9_b", "i0v9_b", "0v9_b_temp", "ccom_avreg_1v2_supply", "ccom_avreg_1v2_supply_ext_adc", "ccom_avreg_1v8_supply", "ccom_avreg_1v8_supply_ext_adc", "controller_master_vptat", "controller_master_internal_temp", "controller_master_vptat_ext_adc", "controller_master_internal_temp_ext_adc", "controller_slave_vptat", "controller_slave_internal_temp", "controller_slave_vptat_ext_adc", "controller_slave_internal_temp_ext_adc", }; /* END MKCONFIG GENERATED MonitorStatNamesBlock */ extern const char * efx_mon_stat_name( __in efx_nic_t *enp, __in efx_mon_stat_t id) { _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(id, <, EFX_MON_NSTATS); return (__mon_stat_name[id]); } #endif /* EFSYS_OPT_NAMES */ - __checkReturn int + __checkReturn efx_rc_t efx_mon_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values) { efx_mon_t *emp = &(enp->en_mon); efx_mon_ops_t *emop = emp->em_emop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON); return (emop->emo_stats_update(enp, esmp, values)); } #endif /* EFSYS_OPT_MON_STATS */ void efx_mon_fini( __in efx_nic_t *enp) { efx_mon_t *emp = &(enp->en_mon); efx_mon_ops_t *emop = emp->em_emop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON); emp->em_emop = NULL; if (emop->emo_reset != NULL) { rc = emop->emo_reset(enp); if (rc != 0) - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); } emp->em_type = EFX_MON_INVALID; enp->en_mod_flags &= ~EFX_MOD_MON; } Index: stable/10/sys/dev/sfxge/common/efx_nic.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_nic.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_nic.c (revision 293927) @@ -1,1040 +1,1040 @@ /*- * Copyright (c) 2007-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" - __checkReturn int + __checkReturn efx_rc_t efx_family( __in uint16_t venid, __in uint16_t devid, __out efx_family_t *efp) { if (venid == EFX_PCI_VENID_SFC) { switch (devid) { #if EFSYS_OPT_FALCON case EFX_PCI_DEVID_FALCON: *efp = EFX_FAMILY_FALCON; return (0); #endif #if EFSYS_OPT_SIENA case EFX_PCI_DEVID_SIENA_F1_UNINIT: /* * Hardware default for PF0 of uninitialised Siena. * manftest must be able to cope with this device id. */ *efp = EFX_FAMILY_SIENA; return (0); case EFX_PCI_DEVID_BETHPAGE: case EFX_PCI_DEVID_SIENA: *efp = EFX_FAMILY_SIENA; return (0); #endif #if EFSYS_OPT_HUNTINGTON case EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT: /* * Hardware default for PF0 of uninitialised Huntington. * manftest must be able to cope with this device id. */ *efp = EFX_FAMILY_HUNTINGTON; return (0); case EFX_PCI_DEVID_FARMINGDALE: case EFX_PCI_DEVID_GREENPORT: case EFX_PCI_DEVID_HUNTINGTON: *efp = EFX_FAMILY_HUNTINGTON; return (0); case EFX_PCI_DEVID_FARMINGDALE_VF: case EFX_PCI_DEVID_GREENPORT_VF: case EFX_PCI_DEVID_HUNTINGTON_VF: *efp = EFX_FAMILY_HUNTINGTON; return (0); #endif default: break; } } *efp = EFX_FAMILY_INVALID; return (ENOTSUP); } /* * To support clients which aren't provided with any PCI context infer * the hardware family by inspecting the hardware. Obviously the caller * must be damn sure they're really talking to a supported device. */ - __checkReturn int + __checkReturn efx_rc_t efx_infer_family( __in efsys_bar_t *esbp, __out efx_family_t *efp) { efx_family_t family; efx_oword_t oword; unsigned int portnum; - int rc; + efx_rc_t rc; EFSYS_BAR_READO(esbp, FR_AZ_CS_DEBUG_REG_OFST, &oword, B_TRUE); portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM); switch (portnum) { case 0: { efx_dword_t dword; uint32_t hw_rev; EFSYS_BAR_READD(esbp, ER_DZ_BIU_HW_REV_ID_REG_OFST, &dword, B_TRUE); hw_rev = EFX_DWORD_FIELD(dword, ERF_DZ_HW_REV_ID); if (hw_rev == ER_DZ_BIU_HW_REV_ID_REG_RESET) { #if EFSYS_OPT_HUNTINGTON family = EFX_FAMILY_HUNTINGTON; break; #endif } else { #if EFSYS_OPT_FALCON family = EFX_FAMILY_FALCON; break; #endif } rc = ENOTSUP; goto fail1; } #if EFSYS_OPT_SIENA case 1: case 2: family = EFX_FAMILY_SIENA; break; #endif default: rc = ENOTSUP; goto fail1; } if (efp != NULL) *efp = family; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #define EFX_BIU_MAGIC0 0x01234567 #define EFX_BIU_MAGIC1 0xfedcba98 - __checkReturn int + __checkReturn efx_rc_t efx_nic_biu_test( __in efx_nic_t *enp) { efx_oword_t oword; - int rc; + efx_rc_t rc; /* * Write magic values to scratch registers 0 and 1, then * verify that the values were written correctly. Interleave * the accesses to ensure that the BIU is not just reading * back the cached value that was last written. */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0); EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) { rc = EIO; goto fail1; } EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) { rc = EIO; goto fail2; } /* * Perform the same test, with the values swapped. This * ensures that subsequent tests don't start with the correct * values already written into the scratch registers. */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0); EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) { rc = EIO; goto fail3; } EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) { rc = EIO; goto fail4; } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_FALCON static efx_nic_ops_t __efx_nic_falcon_ops = { falcon_nic_probe, /* eno_probe */ NULL, /* eno_set_drv_limits */ falcon_nic_reset, /* eno_reset */ falcon_nic_init, /* eno_init */ NULL, /* eno_get_vi_pool */ NULL, /* eno_get_bar_region */ #if EFSYS_OPT_DIAG falcon_sram_test, /* eno_sram_test */ falcon_nic_register_test, /* eno_register_test */ #endif /* EFSYS_OPT_DIAG */ falcon_nic_fini, /* eno_fini */ falcon_nic_unprobe, /* eno_unprobe */ }; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA static efx_nic_ops_t __efx_nic_siena_ops = { siena_nic_probe, /* eno_probe */ NULL, /* eno_set_drv_limits */ siena_nic_reset, /* eno_reset */ siena_nic_init, /* eno_init */ NULL, /* eno_get_vi_pool */ NULL, /* eno_get_bar_region */ #if EFSYS_OPT_DIAG siena_sram_test, /* eno_sram_test */ siena_nic_register_test, /* eno_register_test */ #endif /* EFSYS_OPT_DIAG */ siena_nic_fini, /* eno_fini */ siena_nic_unprobe, /* eno_unprobe */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static efx_nic_ops_t __efx_nic_hunt_ops = { hunt_nic_probe, /* eno_probe */ hunt_nic_set_drv_limits, /* eno_set_drv_limits */ hunt_nic_reset, /* eno_reset */ hunt_nic_init, /* eno_init */ hunt_nic_get_vi_pool, /* eno_get_vi_pool */ hunt_nic_get_bar_region, /* eno_get_bar_region */ #if EFSYS_OPT_DIAG hunt_sram_test, /* eno_sram_test */ hunt_nic_register_test, /* eno_register_test */ #endif /* EFSYS_OPT_DIAG */ hunt_nic_fini, /* eno_fini */ hunt_nic_unprobe, /* eno_unprobe */ }; #endif /* EFSYS_OPT_HUNTINGTON */ - __checkReturn int + __checkReturn efx_rc_t efx_nic_create( __in efx_family_t family, __in efsys_identifier_t *esip, __in efsys_bar_t *esbp, __in efsys_lock_t *eslp, __deref_out efx_nic_t **enpp) { efx_nic_t *enp; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(family, >, EFX_FAMILY_INVALID); EFSYS_ASSERT3U(family, <, EFX_FAMILY_NTYPES); /* Allocate a NIC object */ EFSYS_KMEM_ALLOC(esip, sizeof (efx_nic_t), enp); if (enp == NULL) { rc = ENOMEM; goto fail1; } enp->en_magic = EFX_NIC_MAGIC; switch (family) { #if EFSYS_OPT_FALCON case EFX_FAMILY_FALCON: enp->en_enop = (efx_nic_ops_t *)&__efx_nic_falcon_ops; enp->en_features = 0; break; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: enp->en_enop = (efx_nic_ops_t *)&__efx_nic_siena_ops; enp->en_features = EFX_FEATURE_IPV6 | EFX_FEATURE_LFSR_HASH_INSERT | EFX_FEATURE_LINK_EVENTS | EFX_FEATURE_PERIODIC_MAC_STATS | EFX_FEATURE_WOL | EFX_FEATURE_MCDI | EFX_FEATURE_LOOKAHEAD_SPLIT | EFX_FEATURE_MAC_HEADER_FILTERS | EFX_FEATURE_TX_SRC_FILTERS; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: enp->en_enop = (efx_nic_ops_t *)&__efx_nic_hunt_ops; /* FIXME: Add WOL support */ enp->en_features = EFX_FEATURE_IPV6 | EFX_FEATURE_LINK_EVENTS | EFX_FEATURE_PERIODIC_MAC_STATS | EFX_FEATURE_MCDI | EFX_FEATURE_MAC_HEADER_FILTERS | EFX_FEATURE_MCDI_DMA | EFX_FEATURE_PIO_BUFFERS | EFX_FEATURE_FW_ASSISTED_TSO; break; #endif /* EFSYS_OPT_HUNTINGTON */ default: rc = ENOTSUP; goto fail2; } enp->en_family = family; enp->en_esip = esip; enp->en_esbp = esbp; enp->en_eslp = eslp; *enpp = enp; return (0); fail2: EFSYS_PROBE(fail2); enp->en_magic = 0; /* Free the NIC object */ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_nic_probe( __in efx_nic_t *enp) { efx_nic_ops_t *enop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); #if EFSYS_OPT_MCDI EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); #endif /* EFSYS_OPT_MCDI */ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE)); enop = enp->en_enop; if ((rc = enop->eno_probe(enp)) != 0) goto fail1; if ((rc = efx_phy_probe(enp)) != 0) goto fail2; enp->en_mod_flags |= EFX_MOD_PROBE; return (0); fail2: EFSYS_PROBE(fail2); enop->eno_unprobe(enp); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_PCIE_TUNE - __checkReturn int + __checkReturn efx_rc_t efx_nic_pcie_tune( __in efx_nic_t *enp, unsigned int nlanes) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC)); #if EFSYS_OPT_FALCON if (enp->en_family == EFX_FAMILY_FALCON) return (falcon_nic_pcie_tune(enp, nlanes)); #endif return (ENOTSUP); } - __checkReturn int + __checkReturn efx_rc_t efx_nic_pcie_extended_sync( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC)); #if EFSYS_OPT_SIENA if (enp->en_family == EFX_FAMILY_SIENA) return (siena_nic_pcie_extended_sync(enp)); #endif return (ENOTSUP); } #endif /* EFSYS_OPT_PCIE_TUNE */ - __checkReturn int + __checkReturn efx_rc_t efx_nic_set_drv_limits( __inout efx_nic_t *enp, __in efx_drv_limits_t *edlp) { efx_nic_ops_t *enop = enp->en_enop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); if (enop->eno_set_drv_limits != NULL) { if ((rc = enop->eno_set_drv_limits(enp, edlp)) != 0) goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_nic_get_bar_region( __in efx_nic_t *enp, __in efx_nic_region_t region, __out uint32_t *offsetp, __out size_t *sizep) { efx_nic_ops_t *enop = enp->en_enop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (enop->eno_get_bar_region == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = (enop->eno_get_bar_region)(enp, region, offsetp, sizep)) != 0) { goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_nic_get_vi_pool( __in efx_nic_t *enp, __out uint32_t *evq_countp, __out uint32_t *rxq_countp, __out uint32_t *txq_countp) { efx_nic_ops_t *enop = enp->en_enop; efx_nic_cfg_t *encp = &enp->en_nic_cfg; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (enop->eno_get_vi_pool != NULL) { uint32_t vi_count = 0; if ((rc = (enop->eno_get_vi_pool)(enp, &vi_count)) != 0) goto fail1; *evq_countp = vi_count; *rxq_countp = vi_count; *txq_countp = vi_count; } else { /* Use NIC limits as default value */ *evq_countp = encp->enc_evq_limit; *rxq_countp = encp->enc_rxq_limit; *txq_countp = encp->enc_txq_limit; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_nic_init( __in efx_nic_t *enp) { efx_nic_ops_t *enop = enp->en_enop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); if (enp->en_mod_flags & EFX_MOD_NIC) { rc = EINVAL; goto fail1; } if ((rc = enop->eno_init(enp)) != 0) goto fail2; enp->en_mod_flags |= EFX_MOD_NIC; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_nic_fini( __in efx_nic_t *enp) { efx_nic_ops_t *enop = enp->en_enop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE); EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_NIC); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); enop->eno_fini(enp); enp->en_mod_flags &= ~EFX_MOD_NIC; } void efx_nic_unprobe( __in efx_nic_t *enp) { efx_nic_ops_t *enop = enp->en_enop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); #if EFSYS_OPT_MCDI EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); #endif /* EFSYS_OPT_MCDI */ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); efx_phy_unprobe(enp); enop->eno_unprobe(enp); enp->en_mod_flags &= ~EFX_MOD_PROBE; } void efx_nic_destroy( __in efx_nic_t *enp) { efsys_identifier_t *esip = enp->en_esip; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0); enp->en_family = 0; enp->en_esip = NULL; enp->en_esbp = NULL; enp->en_eslp = NULL; enp->en_enop = NULL; enp->en_magic = 0; /* Free the NIC object */ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp); } - __checkReturn int + __checkReturn efx_rc_t efx_nic_reset( __in efx_nic_t *enp) { efx_nic_ops_t *enop = enp->en_enop; unsigned int mod_flags; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE); /* * All modules except the MCDI, PROBE, NVRAM, VPD, MON (which we * do not reset here) must have been shut down or never initialized. * * A rule of thumb here is: If the controller or MC reboots, is *any* * state lost. If it's lost and needs reapplying, then the module * *must* not be initialised during the reset. */ mod_flags = enp->en_mod_flags; mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM | EFX_MOD_VPD | EFX_MOD_MON); EFSYS_ASSERT3U(mod_flags, ==, 0); if (mod_flags != 0) { rc = EINVAL; goto fail1; } if ((rc = enop->eno_reset(enp)) != 0) goto fail2; enp->en_reset_flags |= EFX_RESET_MAC; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } const efx_nic_cfg_t * efx_nic_cfg_get( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); return (&(enp->en_nic_cfg)); } #if EFSYS_OPT_DIAG - __checkReturn int + __checkReturn efx_rc_t efx_nic_register_test( __in efx_nic_t *enp) { efx_nic_ops_t *enop = enp->en_enop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC)); if ((rc = enop->eno_register_test(enp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_nic_test_registers( __in efx_nic_t *enp, __in efx_register_set_t *rsp, __in size_t count) { unsigned int bit; efx_oword_t original; efx_oword_t reg; efx_oword_t buf; - int rc; + efx_rc_t rc; while (count > 0) { /* This function is only suitable for registers */ EFSYS_ASSERT(rsp->rows == 1); /* bit sweep on and off */ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original, B_TRUE); for (bit = 0; bit < 128; bit++) { /* Is this bit in the mask? */ if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit)) continue; /* Test this bit can be set in isolation */ reg = original; EFX_AND_OWORD(reg, rsp->mask); EFX_SET_OWORD_BIT(reg, bit); EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, ®, B_TRUE); EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf, B_TRUE); EFX_AND_OWORD(buf, rsp->mask); if (memcmp(®, &buf, sizeof (reg))) { rc = EIO; goto fail1; } /* Test this bit can be cleared in isolation */ EFX_OR_OWORD(reg, rsp->mask); EFX_CLEAR_OWORD_BIT(reg, bit); EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, ®, B_TRUE); EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf, B_TRUE); EFX_AND_OWORD(buf, rsp->mask); if (memcmp(®, &buf, sizeof (reg))) { rc = EIO; goto fail2; } } /* Restore the old value */ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE); --count; ++rsp; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); /* Restore the old value */ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_nic_test_tables( __in efx_nic_t *enp, __in efx_register_set_t *rsp, __in efx_pattern_type_t pattern, __in size_t count) { efx_sram_pattern_fn_t func; unsigned int index; unsigned int address; efx_oword_t reg; efx_oword_t buf; - int rc; + efx_rc_t rc; EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES); func = __efx_sram_pattern_fns[pattern]; while (count > 0) { /* Write */ address = rsp->address; for (index = 0; index < rsp->rows; ++index) { func(2 * index + 0, B_FALSE, ®.eo_qword[0]); func(2 * index + 1, B_FALSE, ®.eo_qword[1]); EFX_AND_OWORD(reg, rsp->mask); EFSYS_BAR_WRITEO(enp->en_esbp, address, ®, B_TRUE); address += rsp->step; } /* Read */ address = rsp->address; for (index = 0; index < rsp->rows; ++index) { func(2 * index + 0, B_FALSE, ®.eo_qword[0]); func(2 * index + 1, B_FALSE, ®.eo_qword[1]); EFX_AND_OWORD(reg, rsp->mask); EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE); if (memcmp(®, &buf, sizeof (reg))) { rc = EIO; goto fail1; } address += rsp->step; } ++rsp; --count; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ #if EFSYS_OPT_LOOPBACK extern void efx_loopback_mask( __in efx_loopback_kind_t loopback_kind, __out efx_qword_t *maskp) { efx_qword_t mask; EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS); EFSYS_ASSERT(maskp != NULL); /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XPORT == EFX_LOOPBACK_XPORT); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII_WS == EFX_LOOPBACK_XGMII_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS == EFX_LOOPBACK_XAUI_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_FAR == EFX_LOOPBACK_XAUI_WS_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_NEAR == EFX_LOOPBACK_XAUI_WS_NEAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_WS == EFX_LOOPBACK_GMII_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS == EFX_LOOPBACK_XFI_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS_FAR == EFX_LOOPBACK_XFI_WS_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS_WS == EFX_LOOPBACK_PHYXS_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT == EFX_LOOPBACK_PMA_INT); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_NEAR == EFX_LOOPBACK_SD_NEAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FAR == EFX_LOOPBACK_SD_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT_WS == EFX_LOOPBACK_PMA_INT_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP2_WS == EFX_LOOPBACK_SD_FEP2_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP1_5_WS == EFX_LOOPBACK_SD_FEP1_5_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP_WS == EFX_LOOPBACK_SD_FEP_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FES_WS == EFX_LOOPBACK_SD_FES_WS); /* Build bitmask of possible loopback types */ EFX_ZERO_QWORD(mask); if ((loopback_kind == EFX_LOOPBACK_KIND_OFF) || (loopback_kind == EFX_LOOPBACK_KIND_ALL)) { EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_OFF); } if ((loopback_kind == EFX_LOOPBACK_KIND_MAC) || (loopback_kind == EFX_LOOPBACK_KIND_ALL)) { /* * The "MAC" grouping has historically been used by drivers to * mean loopbacks supported by on-chip hardware. Keep that * meaning here, and include on-chip PHY layer loopbacks. */ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_DATA); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMAC); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGMII); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGXS); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGBR); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI_FAR); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII_FAR); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII_FAR); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI_FAR); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_INT); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_NEAR); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_FAR); } if ((loopback_kind == EFX_LOOPBACK_KIND_PHY) || (loopback_kind == EFX_LOOPBACK_KIND_ALL)) { /* * The "PHY" grouping has historically been used by drivers to * mean loopbacks supported by off-chip hardware. Keep that * meaning here. */ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GPHY); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PHY_XS); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PCS); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_PMD); } *maskp = mask; } -__checkReturn int + __checkReturn efx_rc_t efx_mcdi_get_loopback_modes( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN)]; efx_qword_t mask; efx_qword_t modes; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST + MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN) { rc = EMSGSIZE; goto fail2; } /* * We assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree * in efx_loopback_mask() and in siena_phy.c:siena_phy_get_link(). */ efx_loopback_mask(EFX_LOOPBACK_KIND_ALL, &mask); EFX_AND_QWORD(mask, *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_SUGGESTED)); modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_100M); EFX_AND_QWORD(modes, mask); encp->enc_loopback_types[EFX_LINK_100FDX] = modes; modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_1G); EFX_AND_QWORD(modes, mask); encp->enc_loopback_types[EFX_LINK_1000FDX] = modes; modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_10G); EFX_AND_QWORD(modes, mask); encp->enc_loopback_types[EFX_LINK_10000FDX] = modes; if (req.emr_out_length_used >= MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST + MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) { /* Response includes 40G loopback modes */ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_40G); EFX_AND_QWORD(modes, mask); encp->enc_loopback_types[EFX_LINK_40000FDX] = modes; } EFX_ZERO_QWORD(modes); EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]); encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_LOOPBACK */ Index: stable/10/sys/dev/sfxge/common/efx_nvram.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_nvram.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_nvram.c (revision 293927) @@ -1,887 +1,887 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_NVRAM #if EFSYS_OPT_FALCON static efx_nvram_ops_t __efx_nvram_falcon_ops = { #if EFSYS_OPT_DIAG falcon_nvram_test, /* envo_test */ #endif /* EFSYS_OPT_DIAG */ falcon_nvram_size, /* envo_size */ falcon_nvram_get_version, /* envo_get_version */ falcon_nvram_rw_start, /* envo_rw_start */ falcon_nvram_read_chunk, /* envo_read_chunk */ falcon_nvram_erase, /* envo_erase */ falcon_nvram_write_chunk, /* envo_write_chunk */ falcon_nvram_rw_finish, /* envo_rw_finish */ falcon_nvram_set_version, /* envo_set_version */ }; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA static efx_nvram_ops_t __efx_nvram_siena_ops = { #if EFSYS_OPT_DIAG siena_nvram_test, /* envo_test */ #endif /* EFSYS_OPT_DIAG */ siena_nvram_size, /* envo_size */ siena_nvram_get_version, /* envo_get_version */ siena_nvram_rw_start, /* envo_rw_start */ siena_nvram_read_chunk, /* envo_read_chunk */ siena_nvram_erase, /* envo_erase */ siena_nvram_write_chunk, /* envo_write_chunk */ siena_nvram_rw_finish, /* envo_rw_finish */ siena_nvram_set_version, /* envo_set_version */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static efx_nvram_ops_t __efx_nvram_hunt_ops = { #if EFSYS_OPT_DIAG hunt_nvram_test, /* envo_test */ #endif /* EFSYS_OPT_DIAG */ hunt_nvram_size, /* envo_size */ hunt_nvram_get_version, /* envo_get_version */ hunt_nvram_rw_start, /* envo_rw_start */ hunt_nvram_read_chunk, /* envo_read_chunk */ hunt_nvram_erase, /* envo_erase */ hunt_nvram_write_chunk, /* envo_write_chunk */ hunt_nvram_rw_finish, /* envo_rw_finish */ hunt_nvram_set_version, /* envo_set_version */ }; #endif /* EFSYS_OPT_HUNTINGTON */ - __checkReturn int + __checkReturn efx_rc_t efx_nvram_init( __in efx_nic_t *enp) { efx_nvram_ops_t *envop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NVRAM)); switch (enp->en_family) { #if EFSYS_OPT_FALCON case EFX_FAMILY_FALCON: envop = (efx_nvram_ops_t *)&__efx_nvram_falcon_ops; break; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: envop = (efx_nvram_ops_t *)&__efx_nvram_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: envop = (efx_nvram_ops_t *)&__efx_nvram_hunt_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } enp->en_envop = envop; enp->en_mod_flags |= EFX_MOD_NVRAM; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_DIAG - __checkReturn int + __checkReturn efx_rc_t efx_nvram_test( __in efx_nic_t *enp) { efx_nvram_ops_t *envop = enp->en_envop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); if ((rc = envop->envo_test(enp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ - __checkReturn int + __checkReturn efx_rc_t efx_nvram_size( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out size_t *sizep) { efx_nvram_ops_t *envop = enp->en_envop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); if ((rc = envop->envo_size(enp, type, sizep)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_nvram_get_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]) { efx_nvram_ops_t *envop = enp->en_envop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); if ((rc = envop->envo_get_version(enp, type, subtypep, version)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_nvram_rw_start( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out_opt size_t *chunk_sizep) { efx_nvram_ops_t *envop = enp->en_envop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID); EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID); if ((rc = envop->envo_rw_start(enp, type, chunk_sizep)) != 0) goto fail1; enp->en_nvram_locked = type; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_nvram_read_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { efx_nvram_ops_t *envop = enp->en_envop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID); EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type); if ((rc = envop->envo_read_chunk(enp, type, offset, data, size)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_nvram_erase( __in efx_nic_t *enp, __in efx_nvram_type_t type) { efx_nvram_ops_t *envop = enp->en_envop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID); EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type); if ((rc = envop->envo_erase(enp, type)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_nvram_write_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __in_bcount(size) caddr_t data, __in size_t size) { efx_nvram_ops_t *envop = enp->en_envop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID); EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type); if ((rc = envop->envo_write_chunk(enp, type, offset, data, size)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_nvram_rw_finish( __in efx_nic_t *enp, __in efx_nvram_type_t type) { efx_nvram_ops_t *envop = enp->en_envop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID); EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type); envop->envo_rw_finish(enp, type); enp->en_nvram_locked = EFX_NVRAM_INVALID; } - __checkReturn int + __checkReturn efx_rc_t efx_nvram_set_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in_ecount(4) uint16_t version[4]) { efx_nvram_ops_t *envop = enp->en_envop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); /* * The Siena implementation of envo_set_version() will attempt to * acquire the NVRAM_UPDATE lock for the DYNAMIC_CONFIG sector. * Therefore, you can't have already acquired the NVRAM_UPDATE lock. */ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID); if ((rc = envop->envo_set_version(enp, type, version)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_nvram_fini( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID); enp->en_envop = NULL; enp->en_mod_flags &= ~EFX_MOD_NVRAM; } #endif /* EFSYS_OPT_NVRAM */ #if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD /* * Internal MCDI request handling */ - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_partitions( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size, __out unsigned int *npartnp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_NVRAM_PARTITIONS_IN_LEN, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX)]; unsigned int npartn; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_NVRAM_PARTITIONS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_NVRAM_PARTITIONS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) { rc = EMSGSIZE; goto fail2; } npartn = MCDI_OUT_DWORD(req, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LEN(npartn)) { rc = ENOENT; goto fail3; } if (size < npartn * sizeof (uint32_t)) { rc = ENOSPC; goto fail3; } *npartnp = npartn; memcpy(data, MCDI_OUT2(req, uint32_t, NVRAM_PARTITIONS_OUT_TYPE_ID), (npartn * sizeof (uint32_t))); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_metadata( __in efx_nic_t *enp, __in uint32_t partn, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4], __out_bcount_opt(size) char *descp, __in size_t size) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_NVRAM_METADATA_IN_LEN, MC_CMD_NVRAM_METADATA_OUT_LENMAX)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_NVRAM_METADATA; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_NVRAM_METADATA_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_NVRAM_METADATA_OUT_LENMAX; MCDI_IN_SET_DWORD(req, NVRAM_METADATA_IN_TYPE, partn); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_NVRAM_METADATA_OUT_LENMIN) { rc = EMSGSIZE; goto fail2; } if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS, NVRAM_METADATA_OUT_SUBTYPE_VALID)) { *subtypep = MCDI_OUT_DWORD(req, NVRAM_METADATA_OUT_SUBTYPE); } else { *subtypep = 0; } if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS, NVRAM_METADATA_OUT_VERSION_VALID)) { version[0] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_W); version[1] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_X); version[2] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Y); version[3] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Z); } else { version[0] = version[1] = version[2] = version[3] = 0; } if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS, NVRAM_METADATA_OUT_DESCRIPTION_VALID)) { /* Return optional descrition string */ if ((descp != NULL) && (size > 0)) { size_t desclen; descp[0] = '\0'; desclen = (req.emr_out_length_used - MC_CMD_NVRAM_METADATA_OUT_LEN(0)); EFSYS_ASSERT3U(desclen, <=, MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM); if (size < desclen) { rc = ENOSPC; goto fail3; } memcpy(descp, MCDI_OUT2(req, char, NVRAM_METADATA_OUT_DESCRIPTION), desclen); /* Ensure string is NUL terminated */ descp[desclen] = '\0'; } } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_info( __in efx_nic_t *enp, __in uint32_t partn, __out_opt size_t *sizep, __out_opt uint32_t *addressp, __out_opt uint32_t *erase_sizep) { uint8_t payload[MAX(MC_CMD_NVRAM_INFO_IN_LEN, MC_CMD_NVRAM_INFO_OUT_LEN)]; efx_mcdi_req_t req; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_NVRAM_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_NVRAM_INFO_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_NVRAM_INFO_OUT_LEN; MCDI_IN_SET_DWORD(req, NVRAM_INFO_IN_TYPE, partn); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_NVRAM_INFO_OUT_LEN) { rc = EMSGSIZE; goto fail2; } if (sizep) *sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_SIZE); if (addressp) *addressp = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_PHYSADDR); if (erase_sizep) *erase_sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_ERASESIZE); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_update_start( __in efx_nic_t *enp, __in uint32_t partn) { uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_START_IN_LEN, MC_CMD_NVRAM_UPDATE_START_OUT_LEN)]; efx_mcdi_req_t req; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_NVRAM_UPDATE_START; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_NVRAM_UPDATE_START_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_NVRAM_UPDATE_START_OUT_LEN; MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_START_IN_TYPE, partn); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_read( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t offset, __out_bcount(size) caddr_t data, __in size_t size) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_NVRAM_READ_IN_LEN, MC_CMD_NVRAM_READ_OUT_LENMAX)]; - int rc; + efx_rc_t rc; if (size > MC_CMD_NVRAM_READ_OUT_LENMAX) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_NVRAM_READ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_NVRAM_READ_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_NVRAM_READ_OUT_LENMAX; MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_TYPE, partn); MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_OFFSET, offset); MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_LENGTH, size); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_NVRAM_READ_OUT_LEN(size)) { rc = EMSGSIZE; goto fail2; } memcpy(data, MCDI_OUT2(req, uint8_t, NVRAM_READ_OUT_READ_BUFFER), size); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_erase( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t offset, __in size_t size) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_NVRAM_ERASE_IN_LEN, MC_CMD_NVRAM_ERASE_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_NVRAM_ERASE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_NVRAM_ERASE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_NVRAM_ERASE_OUT_LEN; MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_TYPE, partn); MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_OFFSET, offset); MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_LENGTH, size); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_write( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t offset, __out_bcount(size) caddr_t data, __in size_t size) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_NVRAM_WRITE_IN_LENMAX, MC_CMD_NVRAM_WRITE_OUT_LEN)]; - int rc; + efx_rc_t rc; if (size > MC_CMD_NVRAM_WRITE_IN_LENMAX) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_NVRAM_WRITE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_NVRAM_WRITE_IN_LEN(size); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_NVRAM_WRITE_OUT_LEN; MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_TYPE, partn); MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_OFFSET, offset); MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_LENGTH, size); memcpy(MCDI_IN2(req, uint8_t, NVRAM_WRITE_IN_WRITE_BUFFER), data, size); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_update_finish( __in efx_nic_t *enp, __in uint32_t partn, __in boolean_t reboot) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN, MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_NVRAM_UPDATE_FINISH; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN; MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_IN_TYPE, partn); MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_IN_REBOOT, reboot); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_DIAG - __checkReturn int + __checkReturn efx_rc_t efx_mcdi_nvram_test( __in efx_nic_t *enp, __in uint32_t partn) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_NVRAM_TEST_IN_LEN, MC_CMD_NVRAM_TEST_OUT_LEN)]; int result; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_NVRAM_TEST; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_NVRAM_TEST_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_NVRAM_TEST_OUT_LEN; MCDI_IN_SET_DWORD(req, NVRAM_TEST_IN_TYPE, partn); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_NVRAM_TEST_OUT_LEN) { rc = EMSGSIZE; goto fail2; } result = MCDI_OUT_DWORD(req, NVRAM_TEST_OUT_RESULT); if (result == MC_CMD_NVRAM_TEST_FAIL) { EFSYS_PROBE1(nvram_test_failure, int, partn); rc = (EINVAL); goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ #endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */ Index: stable/10/sys/dev/sfxge/common/efx_phy.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_phy.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_phy.c (revision 293927) @@ -1,839 +1,839 @@ /*- * Copyright (c) 2007-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_FALCON #include "falcon_nvram.h" #endif #if EFSYS_OPT_MAC_FALCON_XMAC #include "falcon_xmac.h" #endif #if EFSYS_OPT_MAC_FALCON_GMAC #include "falcon_gmac.h" #endif #if EFSYS_OPT_PHY_NULL #include "nullphy.h" #endif #if EFSYS_OPT_PHY_QT2022C2 #include "qt2022c2.h" #endif #if EFSYS_OPT_PHY_SFX7101 #include "sfx7101.h" #endif #if EFSYS_OPT_PHY_TXC43128 #include "txc43128.h" #endif #if EFSYS_OPT_PHY_SFT9001 #include "sft9001.h" #endif #if EFSYS_OPT_PHY_QT2025C #include "qt2025c.h" #endif #if EFSYS_OPT_PHY_NULL static efx_phy_ops_t __efx_phy_null_ops = { NULL, /* epo_power */ nullphy_reset, /* epo_reset */ nullphy_reconfigure, /* epo_reconfigure */ nullphy_verify, /* epo_verify */ NULL, /* epo_uplink_check */ nullphy_downlink_check, /* epo_downlink_check */ nullphy_oui_get, /* epo_oui_get */ #if EFSYS_OPT_PHY_STATS nullphy_stats_update, /* epo_stats_update */ #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES nullphy_prop_name, /* epo_prop_name */ #endif nullphy_prop_get, /* epo_prop_get */ nullphy_prop_set, /* epo_prop_set */ #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST NULL, /* epo_bist_enable_offline */ NULL, /* epo_bist_start */ NULL, /* epo_bist_poll */ NULL, /* epo_bist_stop */ #endif /* EFSYS_OPT_BIST */ }; #endif /* EFSYS_OPT_PHY_NULL */ #if EFSYS_OPT_PHY_QT2022C2 static efx_phy_ops_t __efx_phy_qt2022c2_ops = { NULL, /* epo_power */ qt2022c2_reset, /* epo_reset */ qt2022c2_reconfigure, /* epo_reconfigure */ qt2022c2_verify, /* epo_verify */ qt2022c2_uplink_check, /* epo_uplink_check */ qt2022c2_downlink_check, /* epo_downlink_check */ qt2022c2_oui_get, /* epo_oui_get */ #if EFSYS_OPT_PHY_STATS qt2022c2_stats_update, /* epo_stats_update */ #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES qt2022c2_prop_name, /* epo_prop_name */ #endif qt2022c2_prop_get, /* epo_prop_get */ qt2022c2_prop_set, /* epo_prop_set */ #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST NULL, /* epo_bist_enable_offline */ NULL, /* epo_bist_start */ NULL, /* epo_bist_poll */ NULL, /* epo_bist_stop */ #endif /* EFSYS_OPT_BIST */ }; #endif /* EFSYS_OPT_PHY_QT2022C2 */ #if EFSYS_OPT_PHY_SFX7101 static efx_phy_ops_t __efx_phy_sfx7101_ops = { sfx7101_power, /* epo_power */ sfx7101_reset, /* epo_reset */ sfx7101_reconfigure, /* epo_reconfigure */ sfx7101_verify, /* epo_verify */ sfx7101_uplink_check, /* epo_uplink_check */ sfx7101_downlink_check, /* epo_downlink_check */ sfx7101_oui_get, /* epo_oui_get */ #if EFSYS_OPT_PHY_STATS sfx7101_stats_update, /* epo_stats_update */ #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES sfx7101_prop_name, /* epo_prop_name */ #endif sfx7101_prop_get, /* epo_prop_get */ sfx7101_prop_set, /* epo_prop_set */ #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST NULL, /* epo_bist_enable_offline */ NULL, /* epo_bist_start */ NULL, /* epo_bist_poll */ NULL, /* epo_bist_stop */ #endif /* EFSYS_OPT_BIST */ }; #endif /* EFSYS_OPT_PHY_SFX7101 */ #if EFSYS_OPT_PHY_TXC43128 static efx_phy_ops_t __efx_phy_txc43128_ops = { NULL, /* epo_power */ txc43128_reset, /* epo_reset */ txc43128_reconfigure, /* epo_reconfigure */ txc43128_verify, /* epo_verify */ txc43128_uplink_check, /* epo_uplink_check */ txc43128_downlink_check, /* epo_downlink_check */ txc43128_oui_get, /* epo_oui_get */ #if EFSYS_OPT_PHY_STATS txc43128_stats_update, /* epo_stats_update */ #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES txc43128_prop_name, /* epo_prop_name */ #endif txc43128_prop_get, /* epo_prop_get */ txc43128_prop_set, /* epo_prop_set */ #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST NULL, /* epo_bist_enable_offline */ NULL, /* epo_bist_start */ NULL, /* epo_bist_poll */ NULL, /* epo_bist_stop */ #endif /* EFSYS_OPT_BIST */ }; #endif /* EFSYS_OPT_PHY_TXC43128 */ #if EFSYS_OPT_PHY_SFT9001 static efx_phy_ops_t __efx_phy_sft9001_ops = { NULL, /* epo_power */ sft9001_reset, /* epo_reset */ sft9001_reconfigure, /* epo_reconfigure */ sft9001_verify, /* epo_verify */ sft9001_uplink_check, /* epo_uplink_check */ sft9001_downlink_check, /* epo_downlink_check */ sft9001_oui_get, /* epo_oui_get */ #if EFSYS_OPT_PHY_STATS sft9001_stats_update, /* epo_stats_update */ #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES sft9001_prop_name, /* epo_prop_name */ #endif sft9001_prop_get, /* epo_prop_get */ sft9001_prop_set, /* epo_prop_set */ #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST NULL, /* epo_bist_enable_offline */ sft9001_bist_start, /* epo_bist_start */ sft9001_bist_poll, /* epo_bist_poll */ sft9001_bist_stop, /* epo_bist_stop */ #endif /* EFSYS_OPT_BIST */ }; #endif /* EFSYS_OPT_PHY_SFT9001 */ #if EFSYS_OPT_PHY_QT2025C static efx_phy_ops_t __efx_phy_qt2025c_ops = { NULL, /* epo_power */ qt2025c_reset, /* epo_reset */ qt2025c_reconfigure, /* epo_reconfigure */ qt2025c_verify, /* epo_verify */ qt2025c_uplink_check, /* epo_uplink_check */ qt2025c_downlink_check, /* epo_downlink_check */ qt2025c_oui_get, /* epo_oui_get */ #if EFSYS_OPT_PHY_STATS qt2025c_stats_update, /* epo_stats_update */ #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES qt2025c_prop_name, /* epo_prop_name */ #endif qt2025c_prop_get, /* epo_prop_get */ qt2025c_prop_set, /* epo_prop_set */ #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST NULL, /* epo_bist_enable_offline */ NULL, /* epo_bist_start */ NULL, /* epo_bist_poll */ NULL, /* epo_bist_stop */ #endif /* EFSYS_OPT_BIST */ }; #endif /* EFSYS_OPT_PHY_QT2025C */ #if EFSYS_OPT_SIENA static efx_phy_ops_t __efx_phy_siena_ops = { siena_phy_power, /* epo_power */ NULL, /* epo_reset */ siena_phy_reconfigure, /* epo_reconfigure */ siena_phy_verify, /* epo_verify */ NULL, /* epo_uplink_check */ NULL, /* epo_downlink_check */ siena_phy_oui_get, /* epo_oui_get */ #if EFSYS_OPT_PHY_STATS siena_phy_stats_update, /* epo_stats_update */ #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES siena_phy_prop_name, /* epo_prop_name */ #endif siena_phy_prop_get, /* epo_prop_get */ siena_phy_prop_set, /* epo_prop_set */ #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST NULL, /* epo_bist_enable_offline */ siena_phy_bist_start, /* epo_bist_start */ siena_phy_bist_poll, /* epo_bist_poll */ siena_phy_bist_stop, /* epo_bist_stop */ #endif /* EFSYS_OPT_BIST */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static efx_phy_ops_t __efx_phy_hunt_ops = { hunt_phy_power, /* epo_power */ NULL, /* epo_reset */ hunt_phy_reconfigure, /* epo_reconfigure */ hunt_phy_verify, /* epo_verify */ NULL, /* epo_uplink_check */ NULL, /* epo_downlink_check */ hunt_phy_oui_get, /* epo_oui_get */ #if EFSYS_OPT_PHY_STATS hunt_phy_stats_update, /* epo_stats_update */ #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES hunt_phy_prop_name, /* epo_prop_name */ #endif hunt_phy_prop_get, /* epo_prop_get */ hunt_phy_prop_set, /* epo_prop_set */ #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST hunt_bist_enable_offline, /* epo_bist_enable_offline */ hunt_bist_start, /* epo_bist_start */ hunt_bist_poll, /* epo_bist_poll */ hunt_bist_stop, /* epo_bist_stop */ #endif /* EFSYS_OPT_BIST */ }; #endif /* EFSYS_OPT_HUNTINGTON */ - __checkReturn int + __checkReturn efx_rc_t efx_phy_probe( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_phy_ops_t *epop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); epp->ep_port = encp->enc_port; epp->ep_phy_type = encp->enc_phy_type; /* Hook in operations structure */ switch (enp->en_family) { #if EFSYS_OPT_FALCON case EFX_FAMILY_FALCON: switch (epp->ep_phy_type) { #if EFSYS_OPT_PHY_NULL case PHY_TYPE_NONE_DECODE: epop = (efx_phy_ops_t *)&__efx_phy_null_ops; break; #endif #if EFSYS_OPT_PHY_QT2022C2 case PHY_TYPE_QT2022C2_DECODE: epop = (efx_phy_ops_t *)&__efx_phy_qt2022c2_ops; break; #endif #if EFSYS_OPT_PHY_SFX7101 case PHY_TYPE_SFX7101_DECODE: epop = (efx_phy_ops_t *)&__efx_phy_sfx7101_ops; break; #endif #if EFSYS_OPT_PHY_TXC43128 case PHY_TYPE_TXC43128_DECODE: epop = (efx_phy_ops_t *)&__efx_phy_txc43128_ops; break; #endif #if EFSYS_OPT_PHY_SFT9001 case PHY_TYPE_SFT9001A_DECODE: case PHY_TYPE_SFT9001B_DECODE: epop = (efx_phy_ops_t *)&__efx_phy_sft9001_ops; break; #endif #if EFSYS_OPT_PHY_QT2025C case EFX_PHY_QT2025C: epop = (efx_phy_ops_t *)&__efx_phy_qt2025c_ops; break; #endif default: rc = ENOTSUP; goto fail1; } break; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: epop = (efx_phy_ops_t *)&__efx_phy_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: epop = (efx_phy_ops_t *)&__efx_phy_hunt_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ default: rc = ENOTSUP; goto fail1; } epp->ep_epop = epop; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); epp->ep_port = 0; epp->ep_phy_type = 0; return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_phy_verify( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); return (epop->epo_verify(enp)); } #if EFSYS_OPT_PHY_LED_CONTROL - __checkReturn int + __checkReturn efx_rc_t efx_phy_led_set( __in efx_nic_t *enp, __in efx_phy_led_mode_t mode) { efx_nic_cfg_t *encp = (&enp->en_nic_cfg); efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; uint32_t mask; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if (epp->ep_phy_led_mode == mode) goto done; mask = (1 << EFX_PHY_LED_DEFAULT); mask |= encp->enc_led_mask; if (!((1 << mode) & mask)) { rc = ENOTSUP; goto fail1; } EFSYS_ASSERT3U(mode, <, EFX_PHY_LED_NMODES); epp->ep_phy_led_mode = mode; if ((rc = epop->epo_reconfigure(enp)) != 0) goto fail2; done: return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_PHY_LED_CONTROL */ void efx_phy_adv_cap_get( __in efx_nic_t *enp, __in uint32_t flag, __out uint32_t *maskp) { efx_port_t *epp = &(enp->en_port); EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); switch (flag) { case EFX_PHY_CAP_CURRENT: *maskp = epp->ep_adv_cap_mask; break; case EFX_PHY_CAP_DEFAULT: *maskp = epp->ep_default_adv_cap_mask; break; case EFX_PHY_CAP_PERM: *maskp = epp->ep_phy_cap_mask; break; default: EFSYS_ASSERT(B_FALSE); break; } } - __checkReturn int + __checkReturn efx_rc_t efx_phy_adv_cap_set( __in efx_nic_t *enp, __in uint32_t mask) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; uint32_t old_mask; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if ((mask & ~epp->ep_phy_cap_mask) != 0) { rc = ENOTSUP; goto fail1; } if (epp->ep_adv_cap_mask == mask) goto done; old_mask = epp->ep_adv_cap_mask; epp->ep_adv_cap_mask = mask; if ((rc = epop->epo_reconfigure(enp)) != 0) goto fail2; done: return (0); fail2: EFSYS_PROBE(fail2); epp->ep_adv_cap_mask = old_mask; /* Reconfigure for robustness */ if (epop->epo_reconfigure(enp) != 0) { /* * We may have an inconsistent view of our advertised speed * capabilities. */ EFSYS_ASSERT(0); } fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_phy_lp_cap_get( __in efx_nic_t *enp, __out uint32_t *maskp) { efx_port_t *epp = &(enp->en_port); EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); *maskp = epp->ep_lp_cap_mask; } - __checkReturn int + __checkReturn efx_rc_t efx_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); return (epop->epo_oui_get(enp, ouip)); } void efx_phy_media_type_get( __in efx_nic_t *enp, __out efx_phy_media_type_t *typep) { efx_port_t *epp = &(enp->en_port); EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if (epp->ep_module_type != EFX_PHY_MEDIA_INVALID) *typep = epp->ep_module_type; else *typep = epp->ep_fixed_port_type; } #if EFSYS_OPT_PHY_STATS #if EFSYS_OPT_NAMES /* START MKCONFIG GENERATED PhyStatNamesBlock d5f79b4bc2c050fe */ static const char *__efx_phy_stat_name[] = { "oui", "pma_pmd_link_up", "pma_pmd_rx_fault", "pma_pmd_tx_fault", "pma_pmd_rev_a", "pma_pmd_rev_b", "pma_pmd_rev_c", "pma_pmd_rev_d", "pcs_link_up", "pcs_rx_fault", "pcs_tx_fault", "pcs_ber", "pcs_block_errors", "phy_xs_link_up", "phy_xs_rx_fault", "phy_xs_tx_fault", "phy_xs_align", "phy_xs_sync_a", "phy_xs_sync_b", "phy_xs_sync_c", "phy_xs_sync_d", "an_link_up", "an_master", "an_local_rx_ok", "an_remote_rx_ok", "cl22ext_link_up", "snr_a", "snr_b", "snr_c", "snr_d", "pma_pmd_signal_a", "pma_pmd_signal_b", "pma_pmd_signal_c", "pma_pmd_signal_d", "an_complete", "pma_pmd_rev_major", "pma_pmd_rev_minor", "pma_pmd_rev_micro", "pcs_fw_version_0", "pcs_fw_version_1", "pcs_fw_version_2", "pcs_fw_version_3", "pcs_fw_build_yy", "pcs_fw_build_mm", "pcs_fw_build_dd", "pcs_op_mode", }; /* END MKCONFIG GENERATED PhyStatNamesBlock */ const char * efx_phy_stat_name( __in efx_nic_t *enp, __in efx_phy_stat_t type) { _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(type, <, EFX_PHY_NSTATS); return (__efx_phy_stat_name[type]); } #endif /* EFSYS_OPT_NAMES */ - __checkReturn int + __checkReturn efx_rc_t efx_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); return (epop->epo_stats_update(enp, esmp, stat)); } #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES const char * efx_phy_prop_name( __in efx_nic_t *enp, __in unsigned int id) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); return (epop->epo_prop_name(enp, id)); } #endif /* EFSYS_OPT_NAMES */ - __checkReturn int + __checkReturn efx_rc_t efx_phy_prop_get( __in efx_nic_t *enp, __in unsigned int id, __in uint32_t flags, __out uint32_t *valp) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); return (epop->epo_prop_get(enp, id, flags, valp)); } - __checkReturn int + __checkReturn efx_rc_t efx_phy_prop_set( __in efx_nic_t *enp, __in unsigned int id, __in uint32_t val) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); return (epop->epo_prop_set(enp, id, val)); } #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_BIST - __checkReturn int + __checkReturn efx_rc_t efx_bist_enable_offline( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); if (epop->epo_bist_enable_offline == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = epop->epo_bist_enable_offline(enp)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN); EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES); EFSYS_ASSERT3U(epp->ep_current_bist, ==, EFX_BIST_TYPE_UNKNOWN); if (epop->epo_bist_start == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = epop->epo_bist_start(enp, type)) != 0) goto fail2; epp->ep_current_bist = type; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt uint32_t *value_maskp, __out_ecount_opt(count) unsigned long *valuesp, __in size_t count) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN); EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES); EFSYS_ASSERT3U(epp->ep_current_bist, ==, type); EFSYS_ASSERT(epop->epo_bist_poll != NULL); if (epop->epo_bist_poll == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = epop->epo_bist_poll(enp, type, resultp, value_maskp, valuesp, count)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN); EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES); EFSYS_ASSERT3U(epp->ep_current_bist, ==, type); EFSYS_ASSERT(epop->epo_bist_stop != NULL); if (epop->epo_bist_stop != NULL) epop->epo_bist_stop(enp, type); epp->ep_current_bist = EFX_BIST_TYPE_UNKNOWN; } #endif /* EFSYS_OPT_BIST */ void efx_phy_unprobe( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); epp->ep_epop = NULL; epp->ep_adv_cap_mask = 0; epp->ep_port = 0; epp->ep_phy_type = 0; } Index: stable/10/sys/dev/sfxge/common/efx_port.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_port.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_port.c (revision 293927) @@ -1,259 +1,259 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_impl.h" - __checkReturn int + __checkReturn efx_rc_t efx_port_init( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (enp->en_mod_flags & EFX_MOD_PORT) { rc = EINVAL; goto fail1; } enp->en_mod_flags |= EFX_MOD_PORT; epp->ep_mac_type = EFX_MAC_INVALID; epp->ep_link_mode = EFX_LINK_UNKNOWN; epp->ep_mac_poll_needed = B_TRUE; epp->ep_mac_drain = B_TRUE; /* Configure the MAC */ if ((rc = efx_mac_select(enp)) != 0) goto fail1; epp->ep_emop->emo_reconfigure(enp); /* Pick up current phy capababilities */ efx_port_poll(enp, NULL); /* * Turn on the PHY if available, otherwise reset it, and * reconfigure it with the current configuration. */ if (epop->epo_power != NULL) { if ((rc = epop->epo_power(enp, B_TRUE)) != 0) goto fail2; } else { if ((rc = epop->epo_reset(enp)) != 0) goto fail2; } EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_PHY); enp->en_reset_flags &= ~EFX_RESET_PHY; if ((rc = epop->epo_reconfigure(enp)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_mod_flags &= ~EFX_MOD_PORT; return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_port_poll( __in efx_nic_t *enp, __out_opt efx_link_mode_t *link_modep) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; efx_link_mode_t ignore_link_mode; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); EFSYS_ASSERT(!epp->ep_mac_stats_pending); if (link_modep == NULL) link_modep = &ignore_link_mode; if ((rc = emop->emo_poll(enp, link_modep)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_LOOPBACK - __checkReturn int + __checkReturn efx_rc_t efx_port_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t loopback_type) { efx_port_t *epp = &(enp->en_port); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mac_ops_t *emop = epp->ep_emop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); EFSYS_ASSERT(link_mode < EFX_LINK_NMODES); if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode], loopback_type) == 0) { rc = ENOTSUP; goto fail1; } if (epp->ep_loopback_type == loopback_type && epp->ep_loopback_link_mode == link_mode) return (0); if ((rc = emop->emo_loopback_set(enp, link_mode, loopback_type)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_NAMES static const char *__efx_loopback_type_name[] = { "OFF", "DATA", "GMAC", "XGMII", "XGXS", "XAUI", "GMII", "SGMII", "XGBR", "XFI", "XAUI_FAR", "GMII_FAR", "SGMII_FAR", "XFI_FAR", "GPHY", "PHY_XS", "PCS", "PMA_PMD", "XPORT", "XGMII_WS", "XAUI_WS", "XAUI_WS_FAR", "XAUI_WS_NEAR", "GMII_WS", "XFI_WS", "XFI_WS_FAR", "PHYXS_WS", "PMA_INT", "SD_NEAR", "SD_FAR", "PMA_INT_WS", "SD_FEP2_WS", "SD_FEP1_5_WS", "SD_FEP_WS", "SD_FES_WS", }; __checkReturn const char * efx_loopback_type_name( __in efx_nic_t *enp, __in efx_loopback_type_t type) { EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__efx_loopback_type_name) == EFX_LOOPBACK_NTYPES); _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(type, <, EFX_LOOPBACK_NTYPES); return (__efx_loopback_type_name[type]); } #endif /* EFSYS_OPT_NAMES */ #endif /* EFSYS_OPT_LOOPBACK */ void efx_port_fini( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(epp->ep_mac_drain); epp->ep_emop = NULL; epp->ep_mac_type = EFX_MAC_INVALID; epp->ep_mac_drain = B_FALSE; epp->ep_mac_poll_needed = B_FALSE; /* Turn off the PHY */ if (epop->epo_power != NULL) (void) epop->epo_power(enp, B_FALSE); enp->en_mod_flags &= ~EFX_MOD_PORT; } Index: stable/10/sys/dev/sfxge/common/efx_rx.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_rx.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_rx.c (revision 293927) @@ -1,1369 +1,1369 @@ /*- * Copyright (c) 2007-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_init( __in efx_nic_t *enp); static void falconsiena_rx_fini( __in efx_nic_t *enp); #if EFSYS_OPT_RX_HDR_SPLIT -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_hdr_split_enable( __in efx_nic_t *enp, __in unsigned int hdr_buf_size, __in unsigned int pld_buf_size); #endif /* EFSYS_OPT_RX_HDR_SPLIT */ #if EFSYS_OPT_RX_SCATTER -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size); #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_scale_mode_set( __in efx_nic_t *enp, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_scale_key_set( __in efx_nic_t *enp, __in_ecount(n) uint8_t *key, __in size_t n); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_scale_tbl_set( __in efx_nic_t *enp, __in_ecount(n) unsigned int *table, __in size_t n); #endif /* EFSYS_OPT_RX_SCALE */ static void falconsiena_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added); static void falconsiena_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_qflush( __in efx_rxq_t *erp); static void falconsiena_rx_qenable( __in efx_rxq_t *erp); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __in efx_rxq_t *erp); static void falconsiena_rx_qdestroy( __in efx_rxq_t *erp); #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ #if EFSYS_OPT_FALCON static efx_rx_ops_t __efx_rx_falcon_ops = { falconsiena_rx_init, /* erxo_init */ falconsiena_rx_fini, /* erxo_fini */ #if EFSYS_OPT_RX_HDR_SPLIT falconsiena_rx_hdr_split_enable, /* erxo_hdr_split_enable */ #endif #if EFSYS_OPT_RX_SCATTER falconsiena_rx_scatter_enable, /* erxo_scatter_enable */ #endif #if EFSYS_OPT_RX_SCALE falconsiena_rx_scale_mode_set, /* erxo_scale_mode_set */ falconsiena_rx_scale_key_set, /* erxo_scale_key_set */ falconsiena_rx_scale_tbl_set, /* erxo_scale_tbl_set */ #endif falconsiena_rx_qpost, /* erxo_qpost */ falconsiena_rx_qpush, /* erxo_qpush */ falconsiena_rx_qflush, /* erxo_qflush */ falconsiena_rx_qenable, /* erxo_qenable */ falconsiena_rx_qcreate, /* erxo_qcreate */ falconsiena_rx_qdestroy, /* erxo_qdestroy */ }; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA static efx_rx_ops_t __efx_rx_siena_ops = { falconsiena_rx_init, /* erxo_init */ falconsiena_rx_fini, /* erxo_fini */ #if EFSYS_OPT_RX_HDR_SPLIT falconsiena_rx_hdr_split_enable, /* erxo_hdr_split_enable */ #endif #if EFSYS_OPT_RX_SCATTER falconsiena_rx_scatter_enable, /* erxo_scatter_enable */ #endif #if EFSYS_OPT_RX_SCALE falconsiena_rx_scale_mode_set, /* erxo_scale_mode_set */ falconsiena_rx_scale_key_set, /* erxo_scale_key_set */ falconsiena_rx_scale_tbl_set, /* erxo_scale_tbl_set */ #endif falconsiena_rx_qpost, /* erxo_qpost */ falconsiena_rx_qpush, /* erxo_qpush */ falconsiena_rx_qflush, /* erxo_qflush */ falconsiena_rx_qenable, /* erxo_qenable */ falconsiena_rx_qcreate, /* erxo_qcreate */ falconsiena_rx_qdestroy, /* erxo_qdestroy */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static efx_rx_ops_t __efx_rx_hunt_ops = { hunt_rx_init, /* erxo_init */ hunt_rx_fini, /* erxo_fini */ #if EFSYS_OPT_RX_HDR_SPLIT hunt_rx_hdr_split_enable, /* erxo_hdr_split_enable */ #endif #if EFSYS_OPT_RX_SCATTER hunt_rx_scatter_enable, /* erxo_scatter_enable */ #endif #if EFSYS_OPT_RX_SCALE hunt_rx_scale_mode_set, /* erxo_scale_mode_set */ hunt_rx_scale_key_set, /* erxo_scale_key_set */ hunt_rx_scale_tbl_set, /* erxo_scale_tbl_set */ #endif hunt_rx_qpost, /* erxo_qpost */ hunt_rx_qpush, /* erxo_qpush */ hunt_rx_qflush, /* erxo_qflush */ hunt_rx_qenable, /* erxo_qenable */ hunt_rx_qcreate, /* erxo_qcreate */ hunt_rx_qdestroy, /* erxo_qdestroy */ }; #endif /* EFSYS_OPT_HUNTINGTON */ - __checkReturn int + __checkReturn efx_rc_t efx_rx_init( __inout efx_nic_t *enp) { efx_rx_ops_t *erxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (!(enp->en_mod_flags & EFX_MOD_EV)) { rc = EINVAL; goto fail1; } if (enp->en_mod_flags & EFX_MOD_RX) { rc = EINVAL; goto fail2; } switch (enp->en_family) { #if EFSYS_OPT_FALCON case EFX_FAMILY_FALCON: erxop = (efx_rx_ops_t *)&__efx_rx_falcon_ops; break; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: erxop = (efx_rx_ops_t *)&__efx_rx_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: erxop = (efx_rx_ops_t *)&__efx_rx_hunt_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail3; } if ((rc = erxop->erxo_init(enp)) != 0) goto fail4; enp->en_erxop = erxop; enp->en_mod_flags |= EFX_MOD_RX; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_erxop = NULL; enp->en_mod_flags &= ~EFX_MOD_RX; return (rc); } void efx_rx_fini( __in efx_nic_t *enp) { efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0); erxop->erxo_fini(enp); enp->en_erxop = NULL; enp->en_mod_flags &= ~EFX_MOD_RX; } #if EFSYS_OPT_RX_HDR_SPLIT - __checkReturn int + __checkReturn efx_rc_t efx_rx_hdr_split_enable( __in efx_nic_t *enp, __in unsigned int hdr_buf_size, __in unsigned int pld_buf_size) { efx_rx_ops_t *erxop = enp->en_erxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); EFSYS_ASSERT3U(enp->en_family, >=, EFX_FAMILY_SIENA); if ((rc = erxop->erxo_hdr_split_enable(enp, hdr_buf_size, pld_buf_size)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_HDR_SPLIT */ #if EFSYS_OPT_RX_SCATTER - __checkReturn int + __checkReturn efx_rc_t efx_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size) { efx_rx_ops_t *erxop = enp->en_erxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if ((rc = erxop->erxo_scatter_enable(enp, buf_size)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE - __checkReturn int + __checkReturn efx_rc_t efx_rx_hash_support_get( __in efx_nic_t *enp, __out efx_rx_hash_support_t *supportp) { - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if (supportp == NULL) { rc = EINVAL; goto fail1; } /* Report if resources are available to insert RX hash value */ *supportp = enp->en_hash_support; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_rx_scale_support_get( __in efx_nic_t *enp, __out efx_rx_scale_support_t *supportp) { - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if (supportp == NULL) { rc = EINVAL; goto fail1; } /* Report if resources are available to support RSS */ *supportp = enp->en_rss_support; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_rx_scale_mode_set( __in efx_nic_t *enp, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert) { efx_rx_ops_t *erxop = enp->en_erxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if (erxop->erxo_scale_mode_set != NULL) { if ((rc = erxop->erxo_scale_mode_set(enp, alg, type, insert)) != 0) goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE - __checkReturn int + __checkReturn efx_rc_t efx_rx_scale_key_set( __in efx_nic_t *enp, __in_ecount(n) uint8_t *key, __in size_t n) { efx_rx_ops_t *erxop = enp->en_erxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if ((rc = erxop->erxo_scale_key_set(enp, key, n)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE - __checkReturn int + __checkReturn efx_rc_t efx_rx_scale_tbl_set( __in efx_nic_t *enp, __in_ecount(n) unsigned int *table, __in size_t n) { efx_rx_ops_t *erxop = enp->en_erxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if ((rc = erxop->erxo_scale_tbl_set(enp, table, n)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ void efx_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added) { efx_nic_t *enp = erp->er_enp; efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); erxop->erxo_qpost(erp, addrp, size, n, completed, added); } void efx_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp) { efx_nic_t *enp = erp->er_enp; efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); erxop->erxo_qpush(erp, added, pushedp); } - __checkReturn int + __checkReturn efx_rc_t efx_rx_qflush( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_rx_ops_t *erxop = enp->en_erxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); if ((rc = erxop->erxo_qflush(erp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_rx_qenable( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); erxop->erxo_qenable(erp); } - __checkReturn int + __checkReturn efx_rc_t efx_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __deref_out efx_rxq_t **erpp) { efx_rx_ops_t *erxop = enp->en_erxop; efx_rxq_t *erp; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); /* Allocate an RXQ object */ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp); if (erp == NULL) { rc = ENOMEM; goto fail1; } erp->er_magic = EFX_RXQ_MAGIC; erp->er_enp = enp; erp->er_index = index; erp->er_mask = n - 1; erp->er_esmp = esmp; if ((rc = erxop->erxo_qcreate(enp, index, label, type, esmp, n, id, eep, erp)) != 0) goto fail2; enp->en_rx_qcount++; *erpp = erp; return (0); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_rx_qdestroy( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); erxop->erxo_qdestroy(erp); } /* * Psuedo-header info for Siena/Falcon. * * The psuedo-header is a byte array of one of the forms: * * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 * XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.TT.TT.TT.TT * XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.LL.LL * * where: * * TT.TT.TT.TT is a 32-bit Toeplitz hash * LL.LL is a 16-bit LFSR hash * * Hash values are in network (big-endian) byte order. * * * On Huntington the pseudo-header is laid out as: * (See also SF-109306-TC section 9) * * Toeplitz hash (32 bits, little-endian) * Out-of-band outer VLAN tag * (16 bits, big-endian, 0 if the packet did not have an outer VLAN tag) * Out-of-band inner VLAN tag * (16 bits, big-endian, 0 if the packet did not have an inner VLAN tag) * Packet length (16 bits, little-endian, may be 0) * MAC timestamp (32 bits, little-endian, may be 0) * VLAN tag * (16 bits, big-endian, 0 if the packet did not have an outer VLAN tag) * VLAN tag * (16 bits, big-endian, 0 if the packet did not have an inner VLAN tag) */ - __checkReturn int + __checkReturn efx_rc_t efx_psuedo_hdr_pkt_length_get( __in efx_nic_t *enp, __in uint8_t *buffer, __out uint16_t *pkt_lengthp) { if (enp->en_family != EFX_FAMILY_HUNTINGTON) { EFSYS_ASSERT(0); return (ENOTSUP); } *pkt_lengthp = buffer[8] | (buffer[9] << 8); return (0); } #if EFSYS_OPT_RX_SCALE uint32_t efx_psuedo_hdr_hash_get( __in efx_nic_t *enp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer) { if (func == EFX_RX_HASHALG_TOEPLITZ) { switch (enp->en_family) { case EFX_FAMILY_FALCON: case EFX_FAMILY_SIENA: return ((buffer[12] << 24) | (buffer[13] << 16) | (buffer[14] << 8) | buffer[15]); case EFX_FAMILY_HUNTINGTON: return (buffer[0] | (buffer[1] << 8) | (buffer[2] << 16) | (buffer[3] << 24)); default: EFSYS_ASSERT(0); return (0); } } else if (func == EFX_RX_HASHALG_LFSR) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_FALCON || enp->en_family == EFX_FAMILY_SIENA); return ((buffer[14] << 8) | buffer[15]); } else { EFSYS_ASSERT(0); return (0); } } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_init( __in efx_nic_t *enp) { efx_oword_t oword; unsigned int index; EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32); EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword); /* Zero the RSS table */ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) { EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL, index, &oword, B_TRUE); } #if EFSYS_OPT_RX_SCALE /* The RSS key and indirection table are writable. */ enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE; /* Hardware can insert RX hash with/without RSS */ enp->en_hash_support = EFX_RX_HASH_AVAILABLE; #endif /* EFSYS_OPT_RX_SCALE */ return (0); } #if EFSYS_OPT_RX_HDR_SPLIT -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_hdr_split_enable( __in efx_nic_t *enp, __in unsigned int hdr_buf_size, __in unsigned int pld_buf_size) { unsigned int nhdr32; unsigned int npld32; efx_oword_t oword; - int rc; + efx_rc_t rc; nhdr32 = hdr_buf_size / 32; if ((nhdr32 == 0) || (nhdr32 >= (1 << FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH)) || ((hdr_buf_size % 32) != 0)) { rc = EINVAL; goto fail1; } npld32 = pld_buf_size / 32; if ((npld32 == 0) || (npld32 >= (1 << FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH)) || ((pld_buf_size % 32) != 0)) { rc = EINVAL; goto fail2; } if (enp->en_rx_qcount > 0) { rc = EBUSY; goto fail3; } EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_HDR_SPLIT_EN, 1); EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE, nhdr32); EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE, npld32); EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_HDR_SPLIT */ #if EFSYS_OPT_RX_SCATTER -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size) { unsigned int nbuf32; efx_oword_t oword; - int rc; + efx_rc_t rc; nbuf32 = buf_size / 32; if ((nbuf32 == 0) || (nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) || ((buf_size % 32) != 0)) { rc = EINVAL; goto fail1; } if (enp->en_rx_qcount > 0) { rc = EBUSY; goto fail2; } /* Set scatter buffer size */ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32); EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword); /* Enable scatter for packets not matching a filter */ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1); EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCATTER */ #define EFX_RX_LFSR_HASH(_enp, _insert) \ do { \ efx_oword_t oword; \ \ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \ (_insert) ? 1 : 0); \ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \ \ if ((_enp)->en_family == EFX_FAMILY_SIENA) { \ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \ &oword); \ EFX_SET_OWORD_FIELD(oword, \ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \ &oword); \ } \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \ do { \ efx_oword_t oword; \ \ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \ (_ip) ? 1 : 0); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \ (_tcp) ? 0 : 1); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \ (_insert) ? 1 : 0); \ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \ do { \ efx_oword_t oword; \ \ if ((_enp)->en_family == EFX_FAMILY_FALCON) { \ (_rc) = ((_ip) || (_tcp)) ? ENOTSUP : 0; \ break; \ } \ \ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \ EFX_SET_OWORD_FIELD(oword, \ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \ EFX_SET_OWORD_FIELD(oword, \ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \ EFX_SET_OWORD_FIELD(oword, \ FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \ \ (_rc) = 0; \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #if EFSYS_OPT_RX_SCALE -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_scale_mode_set( __in efx_nic_t *enp, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert) { - int rc; + efx_rc_t rc; switch (alg) { case EFX_RX_HASHALG_LFSR: EFX_RX_LFSR_HASH(enp, insert); break; case EFX_RX_HASHALG_TOEPLITZ: EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert, type & (1 << EFX_RX_HASH_IPV4), type & (1 << EFX_RX_HASH_TCPIPV4)); EFX_RX_TOEPLITZ_IPV6_HASH(enp, type & (1 << EFX_RX_HASH_IPV6), type & (1 << EFX_RX_HASH_TCPIPV6), rc); if (rc != 0) goto fail1; break; default: rc = EINVAL; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); EFX_RX_LFSR_HASH(enp, B_FALSE); return (rc); } #endif #if EFSYS_OPT_RX_SCALE -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_scale_key_set( __in efx_nic_t *enp, __in_ecount(n) uint8_t *key, __in size_t n) { efx_oword_t oword; unsigned int byte; unsigned int offset; - int rc; + efx_rc_t rc; byte = 0; /* Write Toeplitz IPv4 hash key */ EFX_ZERO_OWORD(oword); for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8; offset > 0 && byte < n; --offset) oword.eo_u8[offset - 1] = key[byte++]; EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword); byte = 0; /* Verify Toeplitz IPv4 hash key */ EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword); for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8; offset > 0 && byte < n; --offset) { if (oword.eo_u8[offset - 1] != key[byte++]) { rc = EFAULT; goto fail1; } } if ((enp->en_features & EFX_FEATURE_IPV6) == 0) goto done; EFSYS_ASSERT3U(enp->en_family, !=, EFX_FAMILY_FALCON); byte = 0; /* Write Toeplitz IPv6 hash key 3 */ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8; offset > 0 && byte < n; --offset) oword.eo_u8[offset - 1] = key[byte++]; EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword); /* Write Toeplitz IPv6 hash key 2 */ EFX_ZERO_OWORD(oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8; offset > 0 && byte < n; --offset) oword.eo_u8[offset - 1] = key[byte++]; EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword); /* Write Toeplitz IPv6 hash key 1 */ EFX_ZERO_OWORD(oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8; offset > 0 && byte < n; --offset) oword.eo_u8[offset - 1] = key[byte++]; EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword); byte = 0; /* Verify Toeplitz IPv6 hash key 3 */ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8; offset > 0 && byte < n; --offset) { if (oword.eo_u8[offset - 1] != key[byte++]) { rc = EFAULT; goto fail2; } } /* Verify Toeplitz IPv6 hash key 2 */ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8; offset > 0 && byte < n; --offset) { if (oword.eo_u8[offset - 1] != key[byte++]) { rc = EFAULT; goto fail3; } } /* Verify Toeplitz IPv6 hash key 1 */ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8; offset > 0 && byte < n; --offset) { if (oword.eo_u8[offset - 1] != key[byte++]) { rc = EFAULT; goto fail4; } } done: return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif #if EFSYS_OPT_RX_SCALE -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_scale_tbl_set( __in efx_nic_t *enp, __in_ecount(n) unsigned int *table, __in size_t n) { efx_oword_t oword; int index; - int rc; + efx_rc_t rc; EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS); EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH)); if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) { rc = EINVAL; goto fail1; } for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) { uint32_t byte; /* Calculate the entry to place in the table */ byte = (n > 0) ? (uint32_t)table[index % n] : 0; EFSYS_PROBE2(table, int, index, uint32_t, byte); EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte); /* Write the table */ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL, index, &oword, B_TRUE); } for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) { uint32_t byte; /* Determine if we're starting a new batch */ byte = (n > 0) ? (uint32_t)table[index % n] : 0; /* Read the table */ EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL, index, &oword, B_TRUE); /* Verify the entry */ if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) { rc = EFAULT; goto fail2; } } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif static void falconsiena_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added) { efx_qword_t qword; unsigned int i; unsigned int offset; unsigned int id; /* The client driver must not overfill the queue */ EFSYS_ASSERT3U(added - completed + n, <=, EFX_RXQ_LIMIT(erp->er_mask + 1)); id = added & (erp->er_mask); for (i = 0; i < n; i++) { EFSYS_PROBE4(rx_post, unsigned int, erp->er_index, unsigned int, id, efsys_dma_addr_t, addrp[i], size_t, size); EFX_POPULATE_QWORD_3(qword, FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size), FSF_AZ_RX_KER_BUF_ADDR_DW0, (uint32_t)(addrp[i] & 0xffffffff), FSF_AZ_RX_KER_BUF_ADDR_DW1, (uint32_t)(addrp[i] >> 32)); offset = id * sizeof (efx_qword_t); EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword); id = (id + 1) & (erp->er_mask); } } static void falconsiena_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp) { efx_nic_t *enp = erp->er_enp; unsigned int pushed = *pushedp; uint32_t wptr; efx_oword_t oword; efx_dword_t dword; /* All descriptors are pushed */ *pushedp = added; /* Push the populated descriptors out */ wptr = added & erp->er_mask; EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr); /* Only write the third DWORD */ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3)); /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1, wptr, pushed & erp->er_mask); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0, erp->er_index, &dword, B_FALSE); } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_qflush( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_oword_t oword; uint32_t label; label = erp->er_index; /* Flush the queue */ EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, FRF_AZ_RX_FLUSH_DESCQ, label); EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword); return (0); } static void falconsiena_rx_qenable( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_oword_t oword; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL, erp->er_index, &oword, B_TRUE); EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL, erp->er_index, &oword, B_TRUE); } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __in efx_rxq_t *erp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_oword_t oword; uint32_t size; boolean_t split; boolean_t jumbo; - int rc; + efx_rc_t rc; EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH)); EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS); EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit); EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS)); EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS)); if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_rxq_limit) { rc = EINVAL; goto fail2; } for (size = 0; (1 << size) <= (EFX_RXQ_MAXNDESCS / EFX_RXQ_MINNDESCS); size++) if ((1 << size) == (int)(n / EFX_RXQ_MINNDESCS)) break; if (id + (1 << size) >= encp->enc_buftbl_limit) { rc = EINVAL; goto fail3; } switch (type) { case EFX_RXQ_TYPE_DEFAULT: split = B_FALSE; jumbo = B_FALSE; break; #if EFSYS_OPT_RX_HDR_SPLIT case EFX_RXQ_TYPE_SPLIT_HEADER: if ((enp->en_family < EFX_FAMILY_SIENA) || ((index & 1) != 0)) { rc = EINVAL; goto fail4; } split = B_TRUE; jumbo = B_TRUE; break; case EFX_RXQ_TYPE_SPLIT_PAYLOAD: if ((enp->en_family < EFX_FAMILY_SIENA) || ((index & 1) == 0)) { rc = EINVAL; goto fail4; } split = B_FALSE; jumbo = B_TRUE; break; #endif /* EFSYS_OPT_RX_HDR_SPLIT */ #if EFSYS_OPT_RX_SCATTER case EFX_RXQ_TYPE_SCATTER: if (enp->en_family < EFX_FAMILY_SIENA) { rc = EINVAL; goto fail4; } split = B_FALSE; jumbo = B_TRUE; break; #endif /* EFSYS_OPT_RX_SCATTER */ default: rc = EINVAL; goto fail4; } /* Set up the new descriptor queue */ EFX_POPULATE_OWORD_10(oword, FRF_CZ_RX_HDR_SPLIT, split, FRF_AZ_RX_ISCSI_DDIG_EN, 0, FRF_AZ_RX_ISCSI_HDIG_EN, 0, FRF_AZ_RX_DESCQ_BUF_BASE_ID, id, FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index, FRF_AZ_RX_DESCQ_OWNER_ID, 0, FRF_AZ_RX_DESCQ_LABEL, label, FRF_AZ_RX_DESCQ_SIZE, size, FRF_AZ_RX_DESCQ_TYPE, 0, FRF_AZ_RX_DESCQ_JUMBO, jumbo); EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL, erp->er_index, &oword, B_TRUE); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static void falconsiena_rx_qdestroy( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_oword_t oword; EFSYS_ASSERT(enp->en_rx_qcount != 0); --enp->en_rx_qcount; /* Purge descriptor queue */ EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL, erp->er_index, &oword, B_TRUE); /* Free the RXQ object */ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp); } static void falconsiena_rx_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ Index: stable/10/sys/dev/sfxge/common/efx_sram.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_sram.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_sram.c (revision 293927) @@ -1,332 +1,332 @@ /*- * Copyright (c) 2007-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" - __checkReturn int + __checkReturn efx_rc_t efx_sram_buf_tbl_set( __in efx_nic_t *enp, __in uint32_t id, __in efsys_mem_t *esmp, __in size_t n) { efx_qword_t qword; uint32_t start = id; uint32_t stop = start + n; efsys_dma_addr_t addr; efx_oword_t oword; unsigned int count; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); #if EFSYS_OPT_HUNTINGTON if (enp->en_family == EFX_FAMILY_HUNTINGTON) { /* * FIXME: the efx_sram_buf_tbl_*() functionality needs to be * pulled inside the Falcon/Siena queue create/destroy code, * and then the original functions can be removed (see bug30834 * comment #1). But, for now, we just ensure that they are * no-ops for Huntington, to allow bringing up existing drivers * without modification. */ return (0); } #endif /* EFSYS_OPT_HUNTINGTON */ if (stop >= EFX_BUF_TBL_SIZE) { rc = EFBIG; goto fail1; } /* Add the entries into the buffer table */ addr = EFSYS_MEM_ADDR(esmp); for (id = start; id != stop; id++) { EFX_POPULATE_QWORD_5(qword, FRF_AZ_IP_DAT_BUF_SIZE, 0, FRF_AZ_BUF_ADR_REGION, 0, FRF_AZ_BUF_ADR_FBUF_DW0, (uint32_t)((addr >> 12) & 0xffffffff), FRF_AZ_BUF_ADR_FBUF_DW1, (uint32_t)((addr >> 12) >> 32), FRF_AZ_BUF_OWNER_ID_FBUF, 0); EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_FULL_TBL, id, &qword); addr += EFX_BUF_SIZE; } EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1); /* Flush the write buffer */ EFX_POPULATE_OWORD_2(oword, FRF_AZ_BUF_UPD_CMD, 1, FRF_AZ_BUF_CLR_CMD, 0); EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword); /* Poll for the last entry being written to the buffer table */ EFSYS_ASSERT3U(id, ==, stop); addr -= EFX_BUF_SIZE; count = 0; do { EFSYS_PROBE1(wait, unsigned int, count); /* Spin for 1 ms */ EFSYS_SPIN(1000); EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL, id - 1, &qword); if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) == (uint32_t)((addr >> 12) & 0xffffffff) && EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) == (uint32_t)((addr >> 12) >> 32)) goto verify; } while (++count < 100); rc = ETIMEDOUT; goto fail2; verify: /* Verify the rest of the entries in the buffer table */ while (--id != start) { addr -= EFX_BUF_SIZE; /* Read the buffer table entry */ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL, id - 1, &qword); if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) != (uint32_t)((addr >> 12) & 0xffffffff) || EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) != (uint32_t)((addr >> 12) >> 32)) { rc = EFAULT; goto fail3; } } return (0); fail3: EFSYS_PROBE(fail3); id = stop; fail2: EFSYS_PROBE(fail2); EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0, FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, id - 1, FRF_AZ_BUF_CLR_START_ID, start); EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_sram_buf_tbl_clear( __in efx_nic_t *enp, __in uint32_t id, __in size_t n) { efx_oword_t oword; uint32_t start = id; uint32_t stop = start + n; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); #if EFSYS_OPT_HUNTINGTON if (enp->en_family == EFX_FAMILY_HUNTINGTON) { /* * FIXME: the efx_sram_buf_tbl_*() functionality needs to be * pulled inside the Falcon/Siena queue create/destroy code, * and then the original functions can be removed (see bug30834 * comment #1). But, for now, we just ensure that they are * no-ops for Huntington, to allow bringing up existing drivers * without modification. */ return; } #endif /* EFSYS_OPT_HUNTINGTON */ EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE); EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1); EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0, FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, stop - 1, FRF_AZ_BUF_CLR_START_ID, start); EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword); } #if EFSYS_OPT_DIAG static void efx_sram_byte_increment_set( __in size_t row, __in boolean_t negate, __out efx_qword_t *eqp) { size_t offset = row * FR_AZ_SRM_DBG_REG_STEP; unsigned int index; _NOTE(ARGUNUSED(negate)) for (index = 0; index < sizeof (efx_qword_t); index++) eqp->eq_u8[index] = offset + index; } static void efx_sram_all_the_same_set( __in size_t row, __in boolean_t negate, __out efx_qword_t *eqp) { _NOTE(ARGUNUSED(row)) if (negate) EFX_SET_QWORD(*eqp); else EFX_ZERO_QWORD(*eqp); } static void efx_sram_bit_alternate_set( __in size_t row, __in boolean_t negate, __out efx_qword_t *eqp) { _NOTE(ARGUNUSED(row)) EFX_POPULATE_QWORD_2(*eqp, EFX_DWORD_0, (negate) ? 0x55555555 : 0xaaaaaaaa, EFX_DWORD_1, (negate) ? 0x55555555 : 0xaaaaaaaa); } static void efx_sram_byte_alternate_set( __in size_t row, __in boolean_t negate, __out efx_qword_t *eqp) { _NOTE(ARGUNUSED(row)) EFX_POPULATE_QWORD_2(*eqp, EFX_DWORD_0, (negate) ? 0x00ff00ff : 0xff00ff00, EFX_DWORD_1, (negate) ? 0x00ff00ff : 0xff00ff00); } static void efx_sram_byte_changing_set( __in size_t row, __in boolean_t negate, __out efx_qword_t *eqp) { size_t offset = row * FR_AZ_SRM_DBG_REG_STEP; unsigned int index; for (index = 0; index < sizeof (efx_qword_t); index++) { uint8_t byte; if (offset / 256 == 0) byte = (uint8_t)((offset % 257) % 256); else byte = (uint8_t)(~((offset - 8) % 257) % 256); eqp->eq_u8[index] = (negate) ? ~byte : byte; } } static void efx_sram_bit_sweep_set( __in size_t row, __in boolean_t negate, __out efx_qword_t *eqp) { size_t offset = row * FR_AZ_SRM_DBG_REG_STEP; if (negate) { EFX_SET_QWORD(*eqp); EFX_CLEAR_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64); } else { EFX_ZERO_QWORD(*eqp); EFX_SET_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64); } } efx_sram_pattern_fn_t __efx_sram_pattern_fns[] = { efx_sram_byte_increment_set, efx_sram_all_the_same_set, efx_sram_bit_alternate_set, efx_sram_byte_alternate_set, efx_sram_byte_changing_set, efx_sram_bit_sweep_set }; - __checkReturn int + __checkReturn efx_rc_t efx_sram_test( __in efx_nic_t *enp, __in efx_pattern_type_t type) { efx_nic_ops_t *enop = enp->en_enop; efx_sram_pattern_fn_t func; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV)); /* Select pattern generator */ EFSYS_ASSERT3U(type, <, EFX_PATTERN_NTYPES); func = __efx_sram_pattern_fns[type]; return (enop->eno_sram_test(enp, func)); } #endif /* EFSYS_OPT_DIAG */ Index: stable/10/sys/dev/sfxge/common/efx_tx.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_tx.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_tx.c (revision 293927) @@ -1,1073 +1,1073 @@ /*- * Copyright (c) 2007-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_QSTATS #define EFX_TX_QSTAT_INCR(_etp, _stat) \ do { \ (_etp)->et_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_TX_QSTAT_INCR(_etp, _stat) #endif #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_tx_init( __in efx_nic_t *enp); static void falconsiena_tx_fini( __in efx_nic_t *enp); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __in efx_txq_t *etp, __out unsigned int *addedp); static void falconsiena_tx_qdestroy( __in efx_txq_t *etp); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_tx_qpost( __in efx_txq_t *etp, __in_ecount(n) efx_buffer_t *eb, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); static void falconsiena_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns); -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_tx_qflush( __in efx_txq_t *etp); static void falconsiena_tx_qenable( __in efx_txq_t *etp); - __checkReturn int + __checkReturn efx_rc_t falconsiena_tx_qdesc_post( __in efx_txq_t *etp, __in_ecount(n) efx_desc_t *ed, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); void falconsiena_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp); #if EFSYS_OPT_QSTATS static void falconsiena_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat); #endif #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ #if EFSYS_OPT_FALCON static efx_tx_ops_t __efx_tx_falcon_ops = { falconsiena_tx_init, /* etxo_init */ falconsiena_tx_fini, /* etxo_fini */ falconsiena_tx_qcreate, /* etxo_qcreate */ falconsiena_tx_qdestroy, /* etxo_qdestroy */ falconsiena_tx_qpost, /* etxo_qpost */ falconsiena_tx_qpush, /* etxo_qpush */ falconsiena_tx_qpace, /* etxo_qpace */ falconsiena_tx_qflush, /* etxo_qflush */ falconsiena_tx_qenable, /* etxo_qenable */ NULL, /* etxo_qpio_enable */ NULL, /* etxo_qpio_disable */ NULL, /* etxo_qpio_write */ NULL, /* etxo_qpio_post */ falconsiena_tx_qdesc_post, /* etxo_qdesc_post */ falconsiena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */ NULL, /* etxo_qdesc_tso_create */ NULL, /* etxo_qdesc_vlantci_create */ #if EFSYS_OPT_QSTATS falconsiena_tx_qstats_update, /* etxo_qstats_update */ #endif }; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA static efx_tx_ops_t __efx_tx_siena_ops = { falconsiena_tx_init, /* etxo_init */ falconsiena_tx_fini, /* etxo_fini */ falconsiena_tx_qcreate, /* etxo_qcreate */ falconsiena_tx_qdestroy, /* etxo_qdestroy */ falconsiena_tx_qpost, /* etxo_qpost */ falconsiena_tx_qpush, /* etxo_qpush */ falconsiena_tx_qpace, /* etxo_qpace */ falconsiena_tx_qflush, /* etxo_qflush */ falconsiena_tx_qenable, /* etxo_qenable */ NULL, /* etxo_qpio_enable */ NULL, /* etxo_qpio_disable */ NULL, /* etxo_qpio_write */ NULL, /* etxo_qpio_post */ falconsiena_tx_qdesc_post, /* etxo_qdesc_post */ falconsiena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */ NULL, /* etxo_qdesc_tso_create */ NULL, /* etxo_qdesc_vlantci_create */ #if EFSYS_OPT_QSTATS falconsiena_tx_qstats_update, /* etxo_qstats_update */ #endif }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static efx_tx_ops_t __efx_tx_hunt_ops = { hunt_tx_init, /* etxo_init */ hunt_tx_fini, /* etxo_fini */ hunt_tx_qcreate, /* etxo_qcreate */ hunt_tx_qdestroy, /* etxo_qdestroy */ hunt_tx_qpost, /* etxo_qpost */ hunt_tx_qpush, /* etxo_qpush */ hunt_tx_qpace, /* etxo_qpace */ hunt_tx_qflush, /* etxo_qflush */ hunt_tx_qenable, /* etxo_qenable */ hunt_tx_qpio_enable, /* etxo_qpio_enable */ hunt_tx_qpio_disable, /* etxo_qpio_disable */ hunt_tx_qpio_write, /* etxo_qpio_write */ hunt_tx_qpio_post, /* etxo_qpio_post */ hunt_tx_qdesc_post, /* etxo_qdesc_post */ hunt_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */ hunt_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */ hunt_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */ #if EFSYS_OPT_QSTATS hunt_tx_qstats_update, /* etxo_qstats_update */ #endif }; #endif /* EFSYS_OPT_HUNTINGTON */ - __checkReturn int + __checkReturn efx_rc_t efx_tx_init( __in efx_nic_t *enp) { efx_tx_ops_t *etxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (!(enp->en_mod_flags & EFX_MOD_EV)) { rc = EINVAL; goto fail1; } if (enp->en_mod_flags & EFX_MOD_TX) { rc = EINVAL; goto fail2; } switch (enp->en_family) { #if EFSYS_OPT_FALCON case EFX_FAMILY_FALCON: etxop = (efx_tx_ops_t *)&__efx_tx_falcon_ops; break; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: etxop = (efx_tx_ops_t *)&__efx_tx_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: etxop = (efx_tx_ops_t *)&__efx_tx_hunt_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail3; } EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0); if ((rc = etxop->etxo_init(enp)) != 0) goto fail4; enp->en_etxop = etxop; enp->en_mod_flags |= EFX_MOD_TX; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_etxop = NULL; enp->en_mod_flags &= ~EFX_MOD_TX; return (rc); } void efx_tx_fini( __in efx_nic_t *enp) { efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX); EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0); etxop->etxo_fini(enp); enp->en_etxop = NULL; enp->en_mod_flags &= ~EFX_MOD_TX; } - __checkReturn int + __checkReturn efx_rc_t efx_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __deref_out efx_txq_t **etpp, __out unsigned int *addedp) { efx_tx_ops_t *etxop = enp->en_etxop; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_txq_t *etp; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX); EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <, encp->enc_txq_limit); /* Allocate an TXQ object */ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_txq_t), etp); if (etp == NULL) { rc = ENOMEM; goto fail1; } etp->et_magic = EFX_TXQ_MAGIC; etp->et_enp = enp; etp->et_index = index; etp->et_mask = n - 1; etp->et_esmp = esmp; /* Initial descriptor index may be modified by etxo_qcreate */ *addedp = 0; if ((rc = etxop->etxo_qcreate(enp, index, label, esmp, n, id, flags, eep, etp, addedp)) != 0) goto fail2; enp->en_tx_qcount++; *etpp = etp; return (0); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_tx_qdestroy( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT(enp->en_tx_qcount != 0); --enp->en_tx_qcount; etxop->etxo_qdestroy(etp); /* Free the TXQ object */ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp); } - __checkReturn int + __checkReturn efx_rc_t efx_tx_qpost( __in efx_txq_t *etp, __in_ecount(n) efx_buffer_t *eb, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if ((rc = etxop->etxo_qpost(etp, eb, n, completed, addedp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); etxop->etxo_qpush(etp, added, pushed); } - __checkReturn int + __checkReturn efx_rc_t efx_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if ((rc = etxop->etxo_qpace(etp, ns)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_tx_qflush( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if ((rc = etxop->etxo_qflush(etp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_tx_qenable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); etxop->etxo_qenable(etp); } - __checkReturn int + __checkReturn efx_rc_t efx_tx_qpio_enable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if (~enp->en_features & EFX_FEATURE_PIO_BUFFERS) { rc = ENOTSUP; goto fail1; } if (etxop->etxo_qpio_enable == NULL) { rc = ENOTSUP; goto fail2; } if ((rc = etxop->etxo_qpio_enable(etp)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_tx_qpio_disable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if (etxop->etxo_qpio_disable != NULL) etxop->etxo_qpio_disable(etp); } - __checkReturn int + __checkReturn efx_rc_t efx_tx_qpio_write( __in efx_txq_t *etp, __in_ecount(buf_length) uint8_t *buffer, __in size_t buf_length, __in size_t pio_buf_offset) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if (etxop->etxo_qpio_write != NULL) { if ((rc = etxop->etxo_qpio_write(etp, buffer, buf_length, pio_buf_offset)) != 0) goto fail1; return (0); } return (ENOTSUP); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_tx_qpio_post( __in efx_txq_t *etp, __in size_t pkt_length, __in unsigned int completed, __inout unsigned int *addedp) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if (etxop->etxo_qpio_post != NULL) { if ((rc = etxop->etxo_qpio_post(etp, pkt_length, completed, addedp)) != 0) goto fail1; return (0); } return (ENOTSUP); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_tx_qdesc_post( __in efx_txq_t *etp, __in_ecount(n) efx_desc_t *ed, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if ((rc = etxop->etxo_qdesc_post(etp, ed, n, completed, addedp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT(etxop->etxo_qdesc_dma_create != NULL); etxop->etxo_qdesc_dma_create(etp, addr, size, eop, edp); } void efx_tx_qdesc_tso_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint8_t tcp_flags, __out efx_desc_t *edp) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT(etxop->etxo_qdesc_tso_create != NULL); etxop->etxo_qdesc_tso_create(etp, ipv4_id, tcp_seq, tcp_flags, edp); } void efx_tx_qdesc_vlantci_create( __in efx_txq_t *etp, __in uint16_t tci, __out efx_desc_t *edp) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT(etxop->etxo_qdesc_vlantci_create != NULL); etxop->etxo_qdesc_vlantci_create(etp, tci, edp); } #if EFSYS_OPT_QSTATS void efx_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) { efx_nic_t *enp = etp->et_enp; efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); etxop->etxo_qstats_update(etp, stat); } #endif #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_tx_init( __in efx_nic_t *enp) { efx_oword_t oword; /* * Disable the timer-based TX DMA backoff and allow TX DMA to be * controlled by the RX FIFO fill level (although always allow a * minimal trickle). */ EFX_BAR_READO(enp, FR_AZ_TX_RESERVED_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER, 0xfe); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER_EN, 1); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_ONE_PKT_PER_Q, 1); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PUSH_EN, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DIS_NON_IP_EV, 1); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_THRESHOLD, 2); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); /* * Filter all packets less than 14 bytes to avoid parsing * errors. */ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); EFX_BAR_WRITEO(enp, FR_AZ_TX_RESERVED_REG, &oword); /* * Do not set TX_NO_EOP_DISC_EN, since it limits packets to 16 * descriptors (which is bad). */ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_NO_EOP_DISC_EN, 0); EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword); return (0); } #define EFX_TX_DESC(_etp, _addr, _size, _eop, _added) \ do { \ unsigned int id; \ size_t offset; \ efx_qword_t qword; \ \ id = (_added)++ & (_etp)->et_mask; \ offset = id * sizeof (efx_qword_t); \ \ EFSYS_PROBE5(tx_post, unsigned int, (_etp)->et_index, \ unsigned int, id, efsys_dma_addr_t, (_addr), \ size_t, (_size), boolean_t, (_eop)); \ \ EFX_POPULATE_QWORD_4(qword, \ FSF_AZ_TX_KER_CONT, (_eop) ? 0 : 1, \ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)(_size), \ FSF_AZ_TX_KER_BUF_ADDR_DW0, \ (uint32_t)((_addr) & 0xffffffff), \ FSF_AZ_TX_KER_BUF_ADDR_DW1, \ (uint32_t)((_addr) >> 32)); \ EFSYS_MEM_WRITEQ((_etp)->et_esmp, offset, &qword); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_tx_qpost( __in efx_txq_t *etp, __in_ecount(n) efx_buffer_t *eb, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; int rc = ENOSPC; if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) goto fail1; for (i = 0; i < n; i++) { efx_buffer_t *ebp = &eb[i]; efsys_dma_addr_t start = ebp->eb_addr; size_t size = ebp->eb_size; efsys_dma_addr_t end = start + size; /* Fragments must not span 4k boundaries. */ EFSYS_ASSERT(P2ROUNDUP(start + 1, 4096) >= end); EFX_TX_DESC(etp, start, size, ebp->eb_eop, added); } EFX_TX_QSTAT_INCR(etp, TX_POST); *addedp = added; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static void falconsiena_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed) { efx_nic_t *enp = etp->et_enp; uint32_t wptr; efx_dword_t dword; efx_oword_t oword; /* Push the populated descriptors out */ wptr = added & etp->et_mask; EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DESC_WPTR, wptr); /* Only write the third DWORD */ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3)); /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, pushed & etp->et_mask); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_WRITED3(enp, FR_BZ_TX_DESC_UPD_REGP0, etp->et_index, &dword, B_FALSE); } #define EFX_MAX_PACE_VALUE 20 #define EFX_TX_PACE_CLOCK_BASE 104 -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns) { efx_nic_t *enp = etp->et_enp; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_oword_t oword; unsigned int pace_val; unsigned int timer_period; - int rc; + efx_rc_t rc; if (ns == 0) { pace_val = 0; } else { /* * The pace_val to write into the table is s.t * ns <= timer_period * (2 ^ pace_val) */ timer_period = EFX_TX_PACE_CLOCK_BASE / encp->enc_clk_mult; for (pace_val = 1; pace_val <= EFX_MAX_PACE_VALUE; pace_val++) { if ((timer_period << pace_val) >= ns) break; } } if (pace_val > EFX_MAX_PACE_VALUE) { rc = EINVAL; goto fail1; } /* Update the pacing table */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_PACE, pace_val); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_PACE_TBL, etp->et_index, &oword, B_TRUE); return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_tx_qflush( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_oword_t oword; uint32_t label; efx_tx_qpace(etp, 0); label = etp->et_index; /* Flush the queue */ EFX_POPULATE_OWORD_2(oword, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, FRF_AZ_TX_FLUSH_DESCQ, label); EFX_BAR_WRITEO(enp, FR_AZ_TX_FLUSH_DESCQ_REG, &oword); return (0); } static void falconsiena_tx_qenable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_oword_t oword; EFX_BAR_TBL_READO(enp, FR_AZ_TX_DESC_PTR_TBL, etp->et_index, &oword, B_TRUE); EFSYS_PROBE5(tx_descq_ptr, unsigned int, etp->et_index, uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_3), uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_2), uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_1), uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_0)); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DC_HW_RPTR, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_HW_RPTR, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_EN, 1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL, etp->et_index, &oword, B_TRUE); } -static __checkReturn int +static __checkReturn efx_rc_t falconsiena_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __in efx_txq_t *etp, __out unsigned int *addedp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_oword_t oword; uint32_t size; - int rc; + efx_rc_t rc; EFX_STATIC_ASSERT(EFX_EV_TX_NLABELS == (1 << FRF_AZ_TX_DESCQ_LABEL_WIDTH)); EFSYS_ASSERT3U(label, <, EFX_EV_TX_NLABELS); EFSYS_ASSERT(ISP2(EFX_TXQ_MAXNDESCS(encp))); EFX_STATIC_ASSERT(ISP2(EFX_TXQ_MINNDESCS)); if (!ISP2(n) || (n < EFX_TXQ_MINNDESCS) || (n > EFX_EVQ_MAXNEVS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_txq_limit) { rc = EINVAL; goto fail2; } for (size = 0; (1 << size) <= (EFX_TXQ_MAXNDESCS(encp) / EFX_TXQ_MINNDESCS); size++) if ((1 << size) == (int)(n / EFX_TXQ_MINNDESCS)) break; if (id + (1 << size) >= encp->enc_buftbl_limit) { rc = EINVAL; goto fail3; } /* Set up the new descriptor queue */ *addedp = 0; EFX_POPULATE_OWORD_6(oword, FRF_AZ_TX_DESCQ_BUF_BASE_ID, id, FRF_AZ_TX_DESCQ_EVQ_ID, eep->ee_index, FRF_AZ_TX_DESCQ_OWNER_ID, 0, FRF_AZ_TX_DESCQ_LABEL, label, FRF_AZ_TX_DESCQ_SIZE, size, FRF_AZ_TX_DESCQ_TYPE, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_NON_IP_DROP_DIS, 1); EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_IP_CHKSM_DIS, (flags & EFX_CKSUM_IPV4) ? 0 : 1); EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_TCP_CHKSM_DIS, (flags & EFX_CKSUM_TCPUDP) ? 0 : 1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL, etp->et_index, &oword, B_TRUE); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t falconsiena_tx_qdesc_post( __in efx_txq_t *etp, __in_ecount(n) efx_desc_t *ed, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; - int rc; + efx_rc_t rc; if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } for (i = 0; i < n; i++) { efx_desc_t *edp = &ed[i]; unsigned int id; size_t offset; id = added++ & etp->et_mask; offset = id * sizeof (efx_desc_t); EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq); } EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index, unsigned int, added, unsigned int, n); EFX_TX_QSTAT_INCR(etp, TX_POST); *addedp = added; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void falconsiena_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp) { /* Fragments must not span 4k boundaries. */ EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size); EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index, efsys_dma_addr_t, addr, size_t, size, boolean_t, eop); EFX_POPULATE_QWORD_4(edp->ed_eq, FSF_AZ_TX_KER_CONT, eop ? 0 : 1, FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)size, FSF_AZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), FSF_AZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); } #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES /* START MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock 9d8d26a0a5e2c453 */ static const char *__efx_tx_qstat_name[] = { "post", "post_pio", }; /* END MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock */ const char * efx_tx_qstat_name( __in efx_nic_t *enp, __in unsigned int id) { _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(id, <, TX_NQSTATS); return (__efx_tx_qstat_name[id]); } #endif /* EFSYS_OPT_NAMES */ #endif /* EFSYS_OPT_QSTATS */ #if EFSYS_OPT_FALCON || EFSYS_OPT_SIENA #if EFSYS_OPT_QSTATS static void falconsiena_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) { unsigned int id; for (id = 0; id < TX_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, etp->et_stat[id]); etp->et_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ static void falconsiena_tx_qdestroy( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_oword_t oword; /* Purge descriptor queue */ EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL, etp->et_index, &oword, B_TRUE); } static void falconsiena_tx_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } #endif /* EFSYS_OPT_FALCON || EFSYS_OPT_SIENA */ Index: stable/10/sys/dev/sfxge/common/efx_vpd.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_vpd.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_vpd.c (revision 293927) @@ -1,1032 +1,1032 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_VPD #define TAG_TYPE_LBN 7 #define TAG_TYPE_WIDTH 1 #define TAG_TYPE_LARGE_ITEM_DECODE 1 #define TAG_TYPE_SMALL_ITEM_DECODE 0 #define TAG_SMALL_ITEM_NAME_LBN 3 #define TAG_SMALL_ITEM_NAME_WIDTH 4 #define TAG_SMALL_ITEM_SIZE_LBN 0 #define TAG_SMALL_ITEM_SIZE_WIDTH 3 #define TAG_LARGE_ITEM_NAME_LBN 0 #define TAG_LARGE_ITEM_NAME_WIDTH 7 #define TAG_NAME_END_DECODE 0x0f #define TAG_NAME_ID_STRING_DECODE 0x02 #define TAG_NAME_VPD_R_DECODE 0x10 #define TAG_NAME_VPD_W_DECODE 0x11 #if EFSYS_OPT_FALCON static efx_vpd_ops_t __efx_vpd_falcon_ops = { NULL, /* evpdo_init */ falcon_vpd_size, /* evpdo_size */ falcon_vpd_read, /* evpdo_read */ falcon_vpd_verify, /* evpdo_verify */ NULL, /* evpdo_reinit */ falcon_vpd_get, /* evpdo_get */ falcon_vpd_set, /* evpdo_set */ falcon_vpd_next, /* evpdo_next */ falcon_vpd_write, /* evpdo_write */ NULL, /* evpdo_fini */ }; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA static efx_vpd_ops_t __efx_vpd_siena_ops = { siena_vpd_init, /* evpdo_init */ siena_vpd_size, /* evpdo_size */ siena_vpd_read, /* evpdo_read */ siena_vpd_verify, /* evpdo_verify */ siena_vpd_reinit, /* evpdo_reinit */ siena_vpd_get, /* evpdo_get */ siena_vpd_set, /* evpdo_set */ siena_vpd_next, /* evpdo_next */ siena_vpd_write, /* evpdo_write */ siena_vpd_fini, /* evpdo_fini */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static efx_vpd_ops_t __efx_vpd_hunt_ops = { hunt_vpd_init, /* evpdo_init */ hunt_vpd_size, /* evpdo_size */ hunt_vpd_read, /* evpdo_read */ hunt_vpd_verify, /* evpdo_verify */ hunt_vpd_reinit, /* evpdo_reinit */ hunt_vpd_get, /* evpdo_get */ hunt_vpd_set, /* evpdo_set */ hunt_vpd_next, /* evpdo_next */ hunt_vpd_write, /* evpdo_write */ hunt_vpd_fini, /* evpdo_fini */ }; #endif /* EFSYS_OPT_HUNTINGTON */ - __checkReturn int + __checkReturn efx_rc_t efx_vpd_init( __in efx_nic_t *enp) { efx_vpd_ops_t *evpdop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_VPD)); switch (enp->en_family) { #if EFSYS_OPT_FALCON case EFX_FAMILY_FALCON: evpdop = (efx_vpd_ops_t *)&__efx_vpd_falcon_ops; break; #endif /* EFSYS_OPT_FALCON */ #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: evpdop = (efx_vpd_ops_t *)&__efx_vpd_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: evpdop = (efx_vpd_ops_t *)&__efx_vpd_hunt_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } if (evpdop->evpdo_init != NULL) { if ((rc = evpdop->evpdo_init(enp)) != 0) goto fail2; } enp->en_evpdop = evpdop; enp->en_mod_flags |= EFX_MOD_VPD; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_size( __in efx_nic_t *enp, __out size_t *sizep) { efx_vpd_ops_t *evpdop = enp->en_evpdop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_size(enp, sizep)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size) { efx_vpd_ops_t *evpdop = enp->en_evpdop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_read(enp, data, size)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { efx_vpd_ops_t *evpdop = enp->en_evpdop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_verify(enp, data, size)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { efx_vpd_ops_t *evpdop = enp->en_evpdop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if (evpdop->evpdo_reinit == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = evpdop->evpdo_reinit(enp, data, size)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp) { efx_vpd_ops_t *evpdop = enp->en_evpdop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_get(enp, data, size, evvp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_set( __in efx_nic_t *enp, __inout_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp) { efx_vpd_ops_t *evpdop = enp->en_evpdop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_set(enp, data, size, evvp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_next( __in efx_nic_t *enp, __inout_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp) { efx_vpd_ops_t *evpdop = enp->en_evpdop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_next(enp, data, size, evvp, contp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { efx_vpd_ops_t *evpdop = enp->en_evpdop; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_write(enp, data, size)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_vpd_next_tag( __in caddr_t data, __in size_t size, __inout unsigned int *offsetp, __out efx_vpd_tag_t *tagp, __out uint16_t *lengthp) { efx_byte_t byte; efx_word_t word; uint8_t name; uint16_t length; size_t headlen; - int rc; + efx_rc_t rc; if (*offsetp >= size) { rc = EFAULT; goto fail1; } EFX_POPULATE_BYTE_1(byte, EFX_BYTE_0, data[*offsetp]); switch (EFX_BYTE_FIELD(byte, TAG_TYPE)) { case TAG_TYPE_SMALL_ITEM_DECODE: headlen = 1; name = EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_NAME); length = (uint16_t)EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_SIZE); break; case TAG_TYPE_LARGE_ITEM_DECODE: headlen = 3; if (*offsetp + headlen > size) { rc = EFAULT; goto fail2; } name = EFX_BYTE_FIELD(byte, TAG_LARGE_ITEM_NAME); EFX_POPULATE_WORD_2(word, EFX_BYTE_0, data[*offsetp + 1], EFX_BYTE_1, data[*offsetp + 2]); length = EFX_WORD_FIELD(word, EFX_WORD_0); break; default: rc = EFAULT; goto fail2; } if (*offsetp + headlen + length > size) { rc = EFAULT; goto fail3; } EFX_STATIC_ASSERT(TAG_NAME_END_DECODE == EFX_VPD_END); EFX_STATIC_ASSERT(TAG_NAME_ID_STRING_DECODE == EFX_VPD_ID); EFX_STATIC_ASSERT(TAG_NAME_VPD_R_DECODE == EFX_VPD_RO); EFX_STATIC_ASSERT(TAG_NAME_VPD_W_DECODE == EFX_VPD_RW); if (name != EFX_VPD_END && name != EFX_VPD_ID && name != EFX_VPD_RO) { rc = EFAULT; goto fail4; } *tagp = name; *lengthp = length; *offsetp += headlen; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_vpd_next_keyword( __in_bcount(size) caddr_t tag, __in size_t size, __in unsigned int pos, __out efx_vpd_keyword_t *keywordp, __out uint8_t *lengthp) { efx_vpd_keyword_t keyword; uint8_t length; - int rc; + efx_rc_t rc; if (pos + 3U > size) { rc = EFAULT; goto fail1; } keyword = EFX_VPD_KEYWORD(tag[pos], tag[pos + 1]); length = tag[pos + 2]; if (length == 0 || pos + 3U + length > size) { rc = EFAULT; goto fail2; } *keywordp = keyword; *lengthp = length; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_hunk_length( __in_bcount(size) caddr_t data, __in size_t size, __out size_t *lengthp) { efx_vpd_tag_t tag; unsigned int offset; uint16_t taglen; - int rc; + efx_rc_t rc; offset = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_next_tag(data, size, &offset, &tag, &taglen)) != 0) goto fail1; offset += taglen; if (tag == EFX_VPD_END) break; } *lengthp = offset; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_hunk_verify( __in_bcount(size) caddr_t data, __in size_t size, __out_opt boolean_t *cksummedp) { efx_vpd_tag_t tag; efx_vpd_keyword_t keyword; unsigned int offset; unsigned int pos; unsigned int i; uint16_t taglen; uint8_t keylen; uint8_t cksum; boolean_t cksummed = B_FALSE; - int rc; + efx_rc_t rc; /* * Parse every tag,keyword in the existing VPD. If the csum is present, * the assert it is correct, and is the final keyword in the RO block. */ offset = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_next_tag(data, size, &offset, &tag, &taglen)) != 0) goto fail1; if (tag == EFX_VPD_END) break; else if (tag == EFX_VPD_ID) goto done; for (pos = 0; pos != taglen; pos += 3 + keylen) { /* RV keyword must be the last in the block */ if (cksummed) { rc = EFAULT; goto fail2; } if ((rc = efx_vpd_next_keyword(data + offset, taglen, pos, &keyword, &keylen)) != 0) goto fail3; if (keyword == EFX_VPD_KEYWORD('R', 'V')) { cksum = 0; for (i = 0; i < offset + pos + 4; i++) cksum += data[i]; if (cksum != 0) { rc = EFAULT; goto fail4; } cksummed = B_TRUE; } } done: offset += taglen; } if (!cksummed) { rc = EFAULT; goto fail5; } if (cksummedp != NULL) *cksummedp = cksummed; return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static uint8_t __efx_vpd_blank_pid[] = { /* Large resource type ID length 1 */ 0x82, 0x01, 0x00, /* Product name ' ' */ 0x32, }; static uint8_t __efx_vpd_blank_r[] = { /* Large resource type VPD-R length 4 */ 0x90, 0x04, 0x00, /* RV keyword length 1 */ 'R', 'V', 0x01, /* RV payload checksum */ 0x00, }; - __checkReturn int + __checkReturn efx_rc_t efx_vpd_hunk_reinit( __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t wantpid) { unsigned int offset = 0; unsigned int pos; efx_byte_t byte; uint8_t cksum; - int rc; + efx_rc_t rc; if (size < 0x100) { rc = ENOSPC; goto fail1; } if (wantpid) { memcpy(data + offset, __efx_vpd_blank_pid, sizeof (__efx_vpd_blank_pid)); offset += sizeof (__efx_vpd_blank_pid); } memcpy(data + offset, __efx_vpd_blank_r, sizeof (__efx_vpd_blank_r)); offset += sizeof (__efx_vpd_blank_r); /* Update checksum */ cksum = 0; for (pos = 0; pos < offset; pos++) cksum += data[pos]; data[offset - 1] -= cksum; /* Append trailing tag */ EFX_POPULATE_BYTE_3(byte, TAG_TYPE, TAG_TYPE_SMALL_ITEM_DECODE, TAG_SMALL_ITEM_NAME, TAG_NAME_END_DECODE, TAG_SMALL_ITEM_SIZE, 0); data[offset] = EFX_BYTE_FIELD(byte, EFX_BYTE_0); offset++; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_hunk_next( __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_tag_t *tagp, __out efx_vpd_keyword_t *keywordp, __out_bcount_opt(*paylenp) unsigned int *payloadp, __out_opt uint8_t *paylenp, __inout unsigned int *contp) { efx_vpd_tag_t tag; efx_vpd_keyword_t keyword = 0; unsigned int offset; unsigned int pos; unsigned int index; uint16_t taglen; uint8_t keylen; uint8_t paylen; - int rc; + efx_rc_t rc; offset = index = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_next_tag(data, size, &offset, &tag, &taglen)) != 0) goto fail1; if (tag == EFX_VPD_END) break; if (tag == EFX_VPD_ID) { if (index == *contp) { EFSYS_ASSERT3U(taglen, <, 0x100); paylen = (uint8_t)MIN(taglen, 0xff); goto done; } } else { for (pos = 0; pos != taglen; pos += 3 + keylen) { if ((rc = efx_vpd_next_keyword(data + offset, taglen, pos, &keyword, &keylen)) != 0) goto fail2; if (index == *contp) { offset += pos + 3; paylen = keylen; goto done; } } } offset += taglen; } *contp = 0; return (0); done: *tagp = tag; *keywordp = keyword; if (payloadp != NULL) *payloadp = offset; if (paylenp != NULL) *paylenp = paylen; ++(*contp); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_hunk_get( __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_tag_t tag, __in efx_vpd_keyword_t keyword, __out unsigned int *payloadp, __out uint8_t *paylenp) { efx_vpd_tag_t itag; efx_vpd_keyword_t ikeyword; unsigned int offset; unsigned int pos; uint16_t taglen; uint8_t keylen; - int rc; + efx_rc_t rc; offset = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_next_tag(data, size, &offset, &itag, &taglen)) != 0) goto fail1; if (itag == EFX_VPD_END) break; if (itag == tag) { if (itag == EFX_VPD_ID) { EFSYS_ASSERT3U(taglen, <, 0x100); *paylenp = (uint8_t)MIN(taglen, 0xff); *payloadp = offset; return (0); } for (pos = 0; pos != taglen; pos += 3 + keylen) { if ((rc = efx_vpd_next_keyword(data + offset, taglen, pos, &ikeyword, &keylen)) != 0) goto fail2; if (ikeyword == keyword) { *paylenp = keylen; *payloadp = offset + pos + 3; return (0); } } } offset += taglen; } /* Not an error */ return (ENOENT); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_vpd_hunk_set( __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp) { efx_word_t word; efx_vpd_tag_t tag; efx_vpd_keyword_t keyword; unsigned int offset; unsigned int pos; unsigned int taghead; unsigned int source; unsigned int dest; unsigned int i; uint16_t taglen; uint8_t keylen; uint8_t cksum; size_t used; - int rc; + efx_rc_t rc; switch (evvp->evv_tag) { case EFX_VPD_ID: if (evvp->evv_keyword != 0) { rc = EINVAL; goto fail1; } /* Can't delete the ID keyword */ if (evvp->evv_length == 0) { rc = EINVAL; goto fail1; } break; case EFX_VPD_RO: if (evvp->evv_keyword == EFX_VPD_KEYWORD('R', 'V')) { rc = EINVAL; goto fail1; } break; default: rc = EINVAL; goto fail1; } /* Determine total size of all current tags */ if ((rc = efx_vpd_hunk_length(data, size, &used)) != 0) goto fail2; offset = 0; _NOTE(CONSTANTCONDITION) while (1) { taghead = offset; if ((rc = efx_vpd_next_tag(data, size, &offset, &tag, &taglen)) != 0) goto fail3; if (tag == EFX_VPD_END) break; else if (tag != evvp->evv_tag) { offset += taglen; continue; } /* We only support modifying large resource tags */ if (offset - taghead != 3) { rc = EINVAL; goto fail4; } /* * Work out the offset of the byte immediately after the * old (=source) and new (=dest) new keyword/tag */ pos = 0; if (tag == EFX_VPD_ID) { source = offset + taglen; dest = offset + evvp->evv_length; goto check_space; } EFSYS_ASSERT3U(tag, ==, EFX_VPD_RO); source = dest = 0; for (pos = 0; pos != taglen; pos += 3 + keylen) { if ((rc = efx_vpd_next_keyword(data + offset, taglen, pos, &keyword, &keylen)) != 0) goto fail5; if (keyword == evvp->evv_keyword && evvp->evv_length == 0) { /* Deleting this keyword */ source = offset + pos + 3 + keylen; dest = offset + pos; break; } else if (keyword == evvp->evv_keyword) { /* Adjusting this keyword */ source = offset + pos + 3 + keylen; dest = offset + pos + 3 + evvp->evv_length; break; } else if (keyword == EFX_VPD_KEYWORD('R', 'V')) { /* The RV keyword must be at the end */ EFSYS_ASSERT3U(pos + 3 + keylen, ==, taglen); /* * The keyword doesn't already exist. If the * user deleting a non-existant keyword then * this is a no-op. */ if (evvp->evv_length == 0) return (0); /* Insert this keyword before the RV keyword */ source = offset + pos; dest = offset + pos + 3 + evvp->evv_length; break; } } check_space: if (used + dest > size + source) { rc = ENOSPC; goto fail6; } /* Move trailing data */ (void) memmove(data + dest, data + source, used - source); /* Copy contents */ memcpy(data + dest - evvp->evv_length, evvp->evv_value, evvp->evv_length); /* Insert new keyword header if required */ if (tag != EFX_VPD_ID && evvp->evv_length > 0) { EFX_POPULATE_WORD_1(word, EFX_WORD_0, evvp->evv_keyword); data[offset + pos + 0] = EFX_WORD_FIELD(word, EFX_BYTE_0); data[offset + pos + 1] = EFX_WORD_FIELD(word, EFX_BYTE_1); data[offset + pos + 2] = evvp->evv_length; } /* Modify tag length (large resource type) */ taglen += (dest - source); EFX_POPULATE_WORD_1(word, EFX_WORD_0, taglen); data[offset - 2] = EFX_WORD_FIELD(word, EFX_BYTE_0); data[offset - 1] = EFX_WORD_FIELD(word, EFX_BYTE_1); goto checksum; } /* Unable to find the matching tag */ rc = ENOENT; goto fail7; checksum: /* Find the RV tag, and update the checksum */ offset = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_next_tag(data, size, &offset, &tag, &taglen)) != 0) goto fail8; if (tag == EFX_VPD_END) break; if (tag == EFX_VPD_RO) { for (pos = 0; pos != taglen; pos += 3 + keylen) { if ((rc = efx_vpd_next_keyword(data + offset, taglen, pos, &keyword, &keylen)) != 0) goto fail9; if (keyword == EFX_VPD_KEYWORD('R', 'V')) { cksum = 0; for (i = 0; i < offset + pos + 3; i++) cksum += data[i]; data[i] = -cksum; break; } } } offset += taglen; } /* Zero out the unused portion */ (void) memset(data + offset + taglen, 0xff, size - offset - taglen); return (0); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_vpd_fini( __in efx_nic_t *enp) { efx_vpd_ops_t *evpdop = enp->en_evpdop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if (evpdop->evpdo_fini != NULL) evpdop->evpdo_fini(enp); enp->en_evpdop = NULL; enp->en_mod_flags &= ~EFX_MOD_VPD; } #endif /* EFSYS_OPT_VPD */ Index: stable/10/sys/dev/sfxge/common/efx_wol.c =================================================================== --- stable/10/sys/dev/sfxge/common/efx_wol.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/efx_wol.c (revision 293927) @@ -1,417 +1,417 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_impl.h" #if EFSYS_OPT_WOL - __checkReturn int + __checkReturn efx_rc_t efx_wol_init( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_WOL)); if (~(encp->enc_features) & EFX_FEATURE_WOL) { rc = ENOTSUP; goto fail1; } /* Current implementation is Siena specific */ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); enp->en_mod_flags |= EFX_MOD_WOL; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_wol_filter_clear( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_WOL_FILTER_RESET_IN_LEN, MC_CMD_WOL_FILTER_RESET_OUT_LEN)]; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_WOL_FILTER_RESET; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_WOL_FILTER_RESET_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_WOL_FILTER_RESET_OUT_LEN; MCDI_IN_SET_DWORD(req, WOL_FILTER_RESET_IN_MASK, MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS | MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_wol_filter_add( __in efx_nic_t *enp, __in efx_wol_type_t type, __in efx_wol_param_t *paramp, __out uint32_t *filter_idp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_WOL_FILTER_SET_IN_LEN, MC_CMD_WOL_FILTER_SET_OUT_LEN)]; efx_byte_t link_mask; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_WOL_FILTER_SET; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_WOL_FILTER_SET_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_WOL_FILTER_SET_OUT_LEN; switch (type) { case EFX_WOL_TYPE_MAGIC: MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_FILTER_MODE, MC_CMD_FILTER_MODE_SIMPLE); MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_WOL_TYPE, MC_CMD_WOL_TYPE_MAGIC); EFX_MAC_ADDR_COPY( MCDI_IN2(req, uint8_t, WOL_FILTER_SET_IN_MAGIC_MAC), paramp->ewp_magic.mac_addr); break; case EFX_WOL_TYPE_BITMAP: { uint32_t swapped = 0; efx_dword_t *dwordp; unsigned int pos, bit; MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_FILTER_MODE, MC_CMD_FILTER_MODE_SIMPLE); MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_WOL_TYPE, MC_CMD_WOL_TYPE_BITMAP); /* * MC bitmask is supposed to be bit swapped * amongst 32 bit words(!) */ dwordp = MCDI_IN2(req, efx_dword_t, WOL_FILTER_SET_IN_BITMAP_MASK); EFSYS_ASSERT3U(EFX_WOL_BITMAP_MASK_SIZE % 4, ==, 0); for (pos = 0; pos < EFX_WOL_BITMAP_MASK_SIZE; ++pos) { uint8_t native = paramp->ewp_bitmap.mask[pos]; for (bit = 0; bit < 8; ++bit) { swapped <<= 1; swapped |= (native & 0x1); native >>= 1; } if ((pos & 3) == 3) { EFX_POPULATE_DWORD_1(dwordp[pos >> 2], EFX_DWORD_0, swapped); swapped = 0; } } memcpy(MCDI_IN2(req, uint8_t, WOL_FILTER_SET_IN_BITMAP_BITMAP), paramp->ewp_bitmap.value, sizeof (paramp->ewp_bitmap.value)); EFSYS_ASSERT3U(paramp->ewp_bitmap.value_len, <=, sizeof (paramp->ewp_bitmap.value)); MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_BITMAP_LEN, paramp->ewp_bitmap.value_len); } break; case EFX_WOL_TYPE_LINK: MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_FILTER_MODE, MC_CMD_FILTER_MODE_SIMPLE); MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_WOL_TYPE, MC_CMD_WOL_TYPE_LINK); EFX_ZERO_BYTE(link_mask); EFX_SET_BYTE_FIELD(link_mask, MC_CMD_WOL_FILTER_SET_IN_LINK_UP, 1); MCDI_IN_SET_BYTE(req, WOL_FILTER_SET_IN_LINK_MASK, link_mask.eb_u8[0]); break; default: EFSYS_ASSERT3U(type, !=, type); } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_WOL_FILTER_SET_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *filter_idp = MCDI_OUT_DWORD(req, WOL_FILTER_SET_OUT_FILTER_ID); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_wol_filter_remove( __in efx_nic_t *enp, __in uint32_t filter_id) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_WOL_FILTER_REMOVE_IN_LEN, MC_CMD_WOL_FILTER_REMOVE_OUT_LEN)]; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_WOL_FILTER_REMOVE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_WOL_FILTER_REMOVE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_WOL_FILTER_REMOVE_OUT_LEN; MCDI_IN_SET_DWORD(req, WOL_FILTER_REMOVE_IN_FILTER_ID, filter_id); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_lightsout_offload_add( __in efx_nic_t *enp, __in efx_lightsout_offload_type_t type, __in efx_lightsout_offload_param_t *paramp, __out uint32_t *filter_idp) { efx_mcdi_req_t req; uint8_t payload[MAX(MAX(MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN, MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN), MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN)]; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_ADD_LIGHTSOUT_OFFLOAD; req.emr_in_buf = payload; req.emr_in_length = sizeof (type); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN; switch (type) { case EFX_LIGHTSOUT_OFFLOAD_TYPE_ARP: req.emr_in_length = MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN; MCDI_IN_SET_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL, MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP); EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC), paramp->elop_arp.mac_addr); MCDI_IN_SET_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP, paramp->elop_arp.ip); break; case EFX_LIGHTSOUT_OFFLOAD_TYPE_NS: req.emr_in_length = MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN; MCDI_IN_SET_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL, MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS); EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC), paramp->elop_ns.mac_addr); memcpy(MCDI_IN2(req, uint8_t, ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6), paramp->elop_ns.solicited_node, sizeof (paramp->elop_ns.solicited_node)); memcpy(MCDI_IN2(req, uint8_t, ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6), paramp->elop_ns.ip, sizeof (paramp->elop_ns.ip)); break; default: rc = EINVAL; goto fail1; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN) { rc = EMSGSIZE; goto fail3; } *filter_idp = MCDI_OUT_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t efx_lightsout_offload_remove( __in efx_nic_t *enp, __in efx_lightsout_offload_type_t type, __in uint32_t filter_id) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN, MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN)]; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN; switch (type) { case EFX_LIGHTSOUT_OFFLOAD_TYPE_ARP: MCDI_IN_SET_DWORD(req, REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL, MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP); break; case EFX_LIGHTSOUT_OFFLOAD_TYPE_NS: MCDI_IN_SET_DWORD(req, REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL, MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS); break; default: rc = EINVAL; goto fail1; } MCDI_IN_SET_DWORD(req, REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID, filter_id); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_wol_fini( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL); enp->en_mod_flags &= ~EFX_MOD_WOL; } #endif /* EFSYS_OPT_WOL */ Index: stable/10/sys/dev/sfxge/common/hunt_ev.c =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_ev.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_ev.c (revision 293927) @@ -1,1010 +1,1010 @@ /*- * Copyright (c) 2012-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_MON_STATS #include "mcdi_mon.h" #endif #if EFSYS_OPT_HUNTINGTON #if EFSYS_OPT_QSTATS #define EFX_EV_QSTAT_INCR(_eep, _stat) \ do { \ (_eep)->ee_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_EV_QSTAT_INCR(_eep, _stat) #endif static __checkReturn boolean_t hunt_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t hunt_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t hunt_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t hunt_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t hunt_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_init_evq( __in efx_nic_t *enp, __in unsigned int instance, __in efsys_mem_t *esmp, __in size_t nevs, __in uint32_t irq, __out_opt uint32_t *irqp) { efx_mcdi_req_t req; uint8_t payload[ MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), MC_CMD_INIT_EVQ_OUT_LEN)]; efx_qword_t *dma_addr; uint64_t addr; int npages; int i; int supports_rx_batching; - int rc; + efx_rc_t rc; npages = EFX_EVQ_NBUFS(nevs); if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq); /* * On Huntington RX and TX event batching can only be requested * together (even if the datapath firmware doesn't actually support RX * batching). * Cut through is incompatible with RX batching and so enabling cut * through disables RX batching (but it does not affect TX batching). * * So always enable RX and TX event batching, and enable cut through * if RX event batching isn't supported (i.e. on low latency firmware). */ supports_rx_batching = enp->en_nic_cfg.enc_rx_batching_enabled ? 1 : 0; MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS, INIT_EVQ_IN_FLAG_INTERRUPTING, 1, INIT_EVQ_IN_FLAG_RPTR_DOS, 0, INIT_EVQ_IN_FLAG_INT_ARMD, 0, INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_batching, INIT_EVQ_IN_FLAG_RX_MERGE, 1, INIT_EVQ_IN_FLAG_TX_MERGE, 1); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE, MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { rc = EMSGSIZE; goto fail3; } if (irqp != NULL) *irqp = MCDI_OUT_DWORD(req, INIT_EVQ_OUT_IRQ); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_fini_evq( __in efx_nic_t *enp, __in uint32_t instance) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN, MC_CMD_FINI_EVQ_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FINI_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_ev_init( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) return (0); } void hunt_ev_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } - __checkReturn int + __checkReturn efx_rc_t hunt_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t irq; - int rc; + efx_rc_t rc; _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_evq_limit) { rc = EINVAL; goto fail2; } /* Set up the handler table */ eep->ee_rx = hunt_ev_rx; eep->ee_tx = hunt_ev_tx; eep->ee_driver = hunt_ev_driver; eep->ee_drv_gen = hunt_ev_drv_gen; eep->ee_mcdi = hunt_ev_mcdi; /* * Set up the event queue * NOTE: ignore the returned IRQ param as firmware does not set it. */ irq = index; /* INIT_EVQ expects function-relative vector number */ if ((rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, NULL)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_ev_qdestroy( __in efx_evq_t *eep) { efx_nic_t *enp = eep->ee_enp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index); } - __checkReturn int + __checkReturn efx_rc_t hunt_ev_qprime( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; uint32_t rptr; efx_dword_t dword; rptr = count & eep->ee_mask; if (enp->en_nic_cfg.enc_bug35388_workaround) { EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS > (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS < (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_POPULATE_DWORD_2(dword, ERF_DD_EVQ_IND_RPTR_FLAGS, EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, ERF_DD_EVQ_IND_RPTR, (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); EFX_POPULATE_DWORD_2(dword, ERF_DD_EVQ_IND_RPTR_FLAGS, EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, ERF_DD_EVQ_IND_RPTR, rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); } else { EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, &dword, B_FALSE); } return (0); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_driver_event( __in efx_nic_t *enp, __in uint32_t evq, __in efx_qword_t data) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN, MC_CMD_DRIVER_EVENT_OUT_LEN)]; - int rc; + efx_rc_t rc; req.emr_cmd = MC_CMD_DRIVER_EVENT; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN; MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq); MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO, EFX_QWORD_FIELD(data, EFX_DWORD_0)); MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI, EFX_QWORD_FIELD(data, EFX_DWORD_1)); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_ev_qpost( __in efx_evq_t *eep, __in uint16_t data) { efx_nic_t *enp = eep->ee_enp; efx_qword_t event; EFX_POPULATE_QWORD_3(event, ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV, ESF_DZ_DRV_SUB_CODE, 0, ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data); (void) efx_mcdi_driver_event(enp, eep->ee_index, event); } - __checkReturn int + __checkReturn efx_rc_t hunt_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us) { efx_nic_t *enp = eep->ee_enp; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_dword_t dword; uint32_t timer_val, mode; - int rc; + efx_rc_t rc; if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail1; } /* If the value is zero then disable the timer */ if (us == 0) { timer_val = 0; mode = FFE_CZ_TIMER_MODE_DIS; } else { /* Calculate the timer value in quanta */ timer_val = us * 1000 / encp->enc_evq_timer_quantum_ns; /* Moderation value is base 0 so we need to deduct 1 */ if (timer_val > 0) timer_val--; mode = FFE_CZ_TIMER_MODE_INT_HLDOFF; } if (encp->enc_bug35388_workaround) { EFX_POPULATE_DWORD_3(dword, ERF_DD_EVQ_IND_TIMER_FLAGS, EFE_DD_EVQ_IND_TIMER_FLAGS, ERF_DD_EVQ_IND_TIMER_MODE, mode, ERF_DD_EVQ_IND_TIMER_VAL, timer_val); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, 0); } else { EFX_POPULATE_DWORD_2(dword, FRF_CZ_TC_TIMER_MODE, mode, FRF_CZ_TC_TIMER_VAL, timer_val); EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0, eep->ee_index, &dword, 0); } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_QSTATS void hunt_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) { /* * TBD: Consider a common Siena/Huntington function. The code is * essentially identical. */ unsigned int id; for (id = 0; id < EV_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, eep->ee_stat[id]); eep->ee_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ static __checkReturn boolean_t hunt_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; uint32_t size; boolean_t parse_err; uint32_t label; uint32_t mcast; uint32_t eth_base_class; uint32_t eth_tag_class; uint32_t l3_class; uint32_t l4_class; uint32_t next_read_lbits; boolean_t soft1, soft2; uint16_t flags; boolean_t should_abort; efx_evq_rxq_state_t *eersp; unsigned int desc_count; unsigned int last_used_id; EFX_EV_QSTAT_INCR(eep, EV_RX); /* Discard events after RXQ/TXQ errors */ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) return (B_FALSE); /* * FIXME: likely to be incomplete, incorrect and inefficient. * Improvements in all three areas are required. */ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { /* Drop this event */ return (B_FALSE); } flags = 0; size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT) != 0) { /* * FIXME: There is not yet any driver that supports scatter on * Huntington. Scatter support is required for OSX. */ EFSYS_ASSERT(0); flags |= EFX_PKT_CONT; } parse_err = (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE) != 0); label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { /* Ethernet frame CRC bad */ flags |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CRC0_ERR) != 0) { /* IP+TCP, bad CRC in iSCSI header */ flags |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CRC1_ERR) != 0) { /* IP+TCP, bad CRC in iSCSI payload or FCoE or FCoIP */ flags |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { /* ECC memory error */ flags |= EFX_DISCARD; } /* FIXME: do we need soft bits from RXDP firmware ? */ soft1 = (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_SOFT1) != 0); soft2 = (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_SOFT2) != 0); mcast = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); if (mcast == ESE_DZ_MAC_CLASS_UCAST) flags |= EFX_PKT_UNICAST; eth_base_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_BASE_CLASS); eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS); /* bottom 4 bits of incremented index (not last desc consumed) */ next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); /* Increment the count of descriptors read */ eersp = &eep->ee_rxq_state[label]; desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) & EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); eersp->eers_rx_read_ptr += desc_count; /* * FIXME: add error checking to make sure this a batched event. * This could also be an aborted scatter, see Bug36629. */ if (desc_count > 1) { EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); flags |= EFX_PKT_PREFIX_LEN; } /* Calculate the index of the the last descriptor consumed */ last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; /* EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_OVERRIDE_HOLDOFF); */ switch (eth_base_class) { case ESE_DZ_ETH_BASE_CLASS_LLC_SNAP: case ESE_DZ_ETH_BASE_CLASS_LLC: case ESE_DZ_ETH_BASE_CLASS_ETH2: default: break; } switch (eth_tag_class) { case ESE_DZ_ETH_TAG_CLASS_RSVD7: case ESE_DZ_ETH_TAG_CLASS_RSVD6: case ESE_DZ_ETH_TAG_CLASS_RSVD5: case ESE_DZ_ETH_TAG_CLASS_RSVD4: break; case ESE_DZ_ETH_TAG_CLASS_RSVD3: /* Triple tagged */ case ESE_DZ_ETH_TAG_CLASS_VLAN2: /* Double tagged */ case ESE_DZ_ETH_TAG_CLASS_VLAN1: /* VLAN tagged */ flags |= EFX_PKT_VLAN_TAGGED; break; case ESE_DZ_ETH_TAG_CLASS_NONE: default: break; } switch (l3_class) { case ESE_DZ_L3_CLASS_RSVD7: /* Used by firmware for packet overrun */ parse_err = B_TRUE; flags |= EFX_DISCARD; break; case ESE_DZ_L3_CLASS_ARP: case ESE_DZ_L3_CLASS_FCOE: break; case ESE_DZ_L3_CLASS_IP6_FRAG: case ESE_DZ_L3_CLASS_IP6: flags |= EFX_PKT_IPV6; break; case ESE_DZ_L3_CLASS_IP4_FRAG: case ESE_DZ_L3_CLASS_IP4: flags |= EFX_PKT_IPV4; if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR) == 0) flags |= EFX_CKSUM_IPV4; break; case ESE_DZ_L3_CLASS_UNKNOWN: default: break; } switch (l4_class) { case ESE_DZ_L4_CLASS_RSVD7: case ESE_DZ_L4_CLASS_RSVD6: case ESE_DZ_L4_CLASS_RSVD5: case ESE_DZ_L4_CLASS_RSVD4: case ESE_DZ_L4_CLASS_RSVD3: break; case ESE_DZ_L4_CLASS_UDP: flags |= EFX_PKT_UDP; if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR) == 0) flags |= EFX_CKSUM_TCPUDP; break; case ESE_DZ_L4_CLASS_TCP: flags |= EFX_PKT_TCP; if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR) == 0) flags |= EFX_CKSUM_TCPUDP; break; case ESE_DZ_L4_CLASS_UNKNOWN: default: break; } /* If we're not discarding the packet then it is ok */ if (~flags & EFX_DISCARD) EFX_EV_QSTAT_INCR(eep, EV_RX_OK); EFSYS_ASSERT(eecp->eec_rx != NULL); should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags); return (should_abort); } static __checkReturn boolean_t hunt_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; uint32_t id; uint32_t label; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_TX); /* Discard events after RXQ/TXQ errors */ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) return (B_FALSE); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) { /* Drop this event */ return (B_FALSE); } /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX); label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL); EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); EFSYS_ASSERT(eecp->eec_tx != NULL); should_abort = eecp->eec_tx(arg, label, id); return (should_abort); } static __checkReturn boolean_t hunt_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { unsigned int code; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRIVER); should_abort = B_FALSE; code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE); switch (code) { case ESE_DZ_DRV_TIMER_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID); EFSYS_ASSERT(eecp->eec_timer != NULL); should_abort = eecp->eec_timer(arg, id); break; } case ESE_DZ_DRV_WAKE_UP_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID); EFSYS_ASSERT(eecp->eec_wake_up != NULL); should_abort = eecp->eec_wake_up(arg, id); break; } case ESE_DZ_DRV_START_UP_EV: EFSYS_ASSERT(eecp->eec_initialized != NULL); should_abort = eecp->eec_initialized(arg); break; default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); break; } return (should_abort); } static __checkReturn boolean_t hunt_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t data; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); should_abort = B_FALSE; data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0); if (data >= ((uint32_t)1 << 16)) { EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); return (B_TRUE); } EFSYS_ASSERT(eecp->eec_software != NULL); should_abort = eecp->eec_software(arg, (uint16_t)data); return (should_abort); } static __checkReturn boolean_t hunt_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; unsigned code; boolean_t should_abort = B_FALSE; EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); switch (code) { case MCDI_EVENT_CODE_BADSSERT: efx_mcdi_ev_death(enp, EINTR); break; case MCDI_EVENT_CODE_CMDDONE: efx_mcdi_ev_cpl(enp, MCDI_EV_FIELD(eqp, CMDDONE_SEQ), MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); break; case MCDI_EVENT_CODE_LINKCHANGE: { efx_link_mode_t link_mode; hunt_phy_link_ev(enp, eqp, &link_mode); should_abort = eecp->eec_link_change(arg, link_mode); break; } case MCDI_EVENT_CODE_SENSOREVT: { #if EFSYS_OPT_MON_STATS efx_mon_stat_t id; efx_mon_stat_value_t value; - int rc; + efx_rc_t rc; /* Decode monitor stat for MCDI sensor (if supported) */ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) { /* Report monitor stat change */ should_abort = eecp->eec_monitor(arg, id, value); } else if (rc == ENOTSUP) { should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_SENSOREVT, MCDI_EV_FIELD(eqp, DATA)); } else { EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ } #endif break; } case MCDI_EVENT_CODE_SCHEDERR: /* Informational only */ break; case MCDI_EVENT_CODE_REBOOT: /* Falcon/Siena only (should not been seen with Huntington). */ efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MC_REBOOT: /* MC_REBOOT event is used for Huntington (EF10) and later. */ efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: #if EFSYS_OPT_MAC_STATS if (eecp->eec_mac_stats != NULL) { eecp->eec_mac_stats(arg, MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); } #endif break; case MCDI_EVENT_CODE_FWALERT: { uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_FWALERT_SRAM, MCDI_EV_FIELD(eqp, FWALERT_DATA)); else should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_FWALERT, MCDI_EV_FIELD(eqp, DATA)); break; } case MCDI_EVENT_CODE_TX_ERR: { /* * After a TXQ error is detected, firmware sends a TX_ERR event. * This may be followed by TX completions (which we discard), * and then finally by a TX_FLUSH event. Firmware destroys the * TXQ automatically after sending the TX_FLUSH event. */ enp->en_reset_flags |= EFX_RESET_TXQ_ERR; EFSYS_PROBE1(tx_descq_err, uint32_t, MCDI_EV_FIELD(eqp, DATA)); /* Inform the driver that a reset is required. */ eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR, MCDI_EV_FIELD(eqp, TX_ERR_DATA)); break; } case MCDI_EVENT_CODE_TX_FLUSH: { uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ); /* * EF10 firmware sends two TX_FLUSH events: one to the txq's * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set). * We want to wait for all completions, so ignore the events * with TX_FLUSH_TO_DRIVER. */ if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) { should_abort = B_FALSE; break; } EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); should_abort = eecp->eec_txq_flush_done(arg, txq_index); break; } case MCDI_EVENT_CODE_RX_ERR: { /* * After an RXQ error is detected, firmware sends an RX_ERR * event. This may be followed by RX events (which we discard), * and then finally by an RX_FLUSH event. Firmware destroys the * RXQ automatically after sending the RX_FLUSH event. */ enp->en_reset_flags |= EFX_RESET_RXQ_ERR; EFSYS_PROBE1(rx_descq_err, uint32_t, MCDI_EV_FIELD(eqp, DATA)); /* Inform the driver that a reset is required. */ eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR, MCDI_EV_FIELD(eqp, RX_ERR_DATA)); break; } case MCDI_EVENT_CODE_RX_FLUSH: { uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ); /* * EF10 firmware sends two RX_FLUSH events: one to the rxq's * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set). * We want to wait for all completions, so ignore the events * with RX_FLUSH_TO_DRIVER. */ if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) { should_abort = B_FALSE; break; } EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); break; } default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); break; } return (should_abort); } void hunt_ev_rxlabel_init( __in efx_evq_t *eep, __in efx_rxq_t *erp, __in unsigned int label) { efx_evq_rxq_state_t *eersp; EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); eersp = &eep->ee_rxq_state[label]; EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0); eersp->eers_rx_read_ptr = 0; eersp->eers_rx_mask = erp->er_mask; } void hunt_ev_rxlabel_fini( __in efx_evq_t *eep, __in unsigned int label) { efx_evq_rxq_state_t *eersp; EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); eersp = &eep->ee_rxq_state[label]; EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0); eersp->eers_rx_read_ptr = 0; eersp->eers_rx_mask = 0; } #endif /* EFSYS_OPT_HUNTINGTON */ Index: stable/10/sys/dev/sfxge/common/hunt_filter.c =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_filter.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_filter.c (revision 293927) @@ -1,1376 +1,1376 @@ /*- * Copyright (c) 2007-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs_mcdi.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON #if EFSYS_OPT_FILTER #define HFE_SPEC(hftp, index) ((hftp)->hft_entry[(index)].hfe_spec) static efx_filter_spec_t * hunt_filter_entry_spec( __in const hunt_filter_table_t *hftp, __in unsigned int index) { return ((efx_filter_spec_t *)(HFE_SPEC(hftp, index) & ~(uintptr_t)EFX_HUNT_FILTER_FLAGS)); } static boolean_t hunt_filter_entry_is_busy( __in const hunt_filter_table_t *hftp, __in unsigned int index) { if (HFE_SPEC(hftp, index) & EFX_HUNT_FILTER_FLAG_BUSY) return (B_TRUE); else return (B_FALSE); } static boolean_t hunt_filter_entry_is_auto_old( __in const hunt_filter_table_t *hftp, __in unsigned int index) { if (HFE_SPEC(hftp, index) & EFX_HUNT_FILTER_FLAG_AUTO_OLD) return (B_TRUE); else return (B_FALSE); } static void hunt_filter_set_entry( __inout hunt_filter_table_t *hftp, __in unsigned int index, __in_opt const efx_filter_spec_t *efsp) { HFE_SPEC(hftp, index) = (uintptr_t)efsp; } static void hunt_filter_set_entry_busy( __inout hunt_filter_table_t *hftp, __in unsigned int index) { HFE_SPEC(hftp, index) |= (uintptr_t)EFX_HUNT_FILTER_FLAG_BUSY; } static void hunt_filter_set_entry_not_busy( __inout hunt_filter_table_t *hftp, __in unsigned int index) { HFE_SPEC(hftp, index) &= ~(uintptr_t)EFX_HUNT_FILTER_FLAG_BUSY; } static void hunt_filter_set_entry_auto_old( __inout hunt_filter_table_t *hftp, __in unsigned int index) { EFSYS_ASSERT(hunt_filter_entry_spec(hftp, index) != NULL); HFE_SPEC(hftp, index) |= (uintptr_t)EFX_HUNT_FILTER_FLAG_AUTO_OLD; } static void hunt_filter_set_entry_not_auto_old( __inout hunt_filter_table_t *hftp, __in unsigned int index) { HFE_SPEC(hftp, index) &= ~(uintptr_t)EFX_HUNT_FILTER_FLAG_AUTO_OLD; EFSYS_ASSERT(hunt_filter_entry_spec(hftp, index) != NULL); } - __checkReturn int + __checkReturn efx_rc_t hunt_filter_init( __in efx_nic_t *enp) { - int rc; + efx_rc_t rc; hunt_filter_table_t *hftp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); #define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match)) EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_IP)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_IP)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_MAC)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_PORT)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO == MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO)); #undef MATCH_MASK EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (hunt_filter_table_t), hftp); if (!hftp) { rc = ENOMEM; goto fail1; } enp->en_filter.ef_hunt_filter_table = hftp; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_filter_fini( __in efx_nic_t *enp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); if (enp->en_filter.ef_hunt_filter_table != NULL) { EFSYS_KMEM_FREE(enp->en_esip, sizeof (hunt_filter_table_t), enp->en_filter.ef_hunt_filter_table); } } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_filter_op_add( __in efx_nic_t *enp, __in efx_filter_spec_t *spec, __in unsigned int filter_op, __inout hunt_filter_handle_t *handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN, MC_CMD_FILTER_OP_OUT_LEN)]; uint32_t match_fields = 0; - int rc; + efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FILTER_OP; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN; switch (filter_op) { case MC_CMD_FILTER_OP_IN_OP_REPLACE: MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO, handle->hfh_lo); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI, handle->hfh_hi); /* Fall through */ case MC_CMD_FILTER_OP_IN_OP_INSERT: case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE: MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, filter_op); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail1; } if (spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { /* * The LOC_MAC_IG match flag can represent unknown unicast * or multicast filters - use the MAC address to distinguish * them. */ if (EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac)) match_fields |= 1U << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN; else match_fields |= 1U << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; } match_fields |= spec->efs_match_flags & (~EFX_FILTER_MATCH_LOC_MAC_IG); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_MATCH_FIELDS, match_fields); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_DEST, MC_CMD_FILTER_OP_IN_RX_DEST_HOST); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_QUEUE, spec->efs_dmaq_id); if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) { MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_CONTEXT, spec->efs_rss_context); } MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_MODE, spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ? MC_CMD_FILTER_OP_IN_RX_MODE_RSS : MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_TX_DEST, MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) { /* * NOTE: Unlike most MCDI requests, the filter fields * are presented in network (big endian) byte order. */ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_MAC), spec->efs_rem_mac, EFX_MAC_ADDR_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_MAC), spec->efs_loc_mac, EFX_MAC_ADDR_LEN); MCDI_IN_SET_WORD(req, FILTER_OP_IN_SRC_PORT, __CPU_TO_BE_16(spec->efs_rem_port)); MCDI_IN_SET_WORD(req, FILTER_OP_IN_DST_PORT, __CPU_TO_BE_16(spec->efs_loc_port)); MCDI_IN_SET_WORD(req, FILTER_OP_IN_ETHER_TYPE, __CPU_TO_BE_16(spec->efs_ether_type)); MCDI_IN_SET_WORD(req, FILTER_OP_IN_INNER_VLAN, __CPU_TO_BE_16(spec->efs_inner_vid)); MCDI_IN_SET_WORD(req, FILTER_OP_IN_OUTER_VLAN, __CPU_TO_BE_16(spec->efs_outer_vid)); /* IP protocol (in low byte, high byte is zero) */ MCDI_IN_SET_BYTE(req, FILTER_OP_IN_IP_PROTO, spec->efs_ip_proto); EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) == MC_CMD_FILTER_OP_IN_SRC_IP_LEN); EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) == MC_CMD_FILTER_OP_IN_DST_IP_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_IP), &spec->efs_rem_host.eo_byte[0], MC_CMD_FILTER_OP_IN_SRC_IP_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_IP), &spec->efs_loc_host.eo_byte[0], MC_CMD_FILTER_OP_IN_DST_IP_LEN); } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) { rc = EMSGSIZE; goto fail3; } handle->hfh_lo = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_LO); handle->hfh_hi = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_HI); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_filter_op_delete( __in efx_nic_t *enp, __in unsigned int filter_op, __inout hunt_filter_handle_t *handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN, MC_CMD_FILTER_OP_OUT_LEN)]; - int rc; + efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FILTER_OP; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN; switch (filter_op) { case MC_CMD_FILTER_OP_IN_OP_REMOVE: MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, MC_CMD_FILTER_OP_IN_OP_REMOVE); break; case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE: MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail1; } MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO, handle->hfh_lo); MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI, handle->hfh_hi); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) { rc = EMSGSIZE; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn boolean_t hunt_filter_equal( __in const efx_filter_spec_t *left, __in const efx_filter_spec_t *right) { /* FIXME: Consider rx vs tx filters (look at efs_flags) */ if (left->efs_match_flags != right->efs_match_flags) return (B_FALSE); if (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host)) return (B_FALSE); if (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host)) return (B_FALSE); if (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN)) return (B_FALSE); if (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN)) return (B_FALSE); if (left->efs_rem_port != right->efs_rem_port) return (B_FALSE); if (left->efs_loc_port != right->efs_loc_port) return (B_FALSE); if (left->efs_inner_vid != right->efs_inner_vid) return (B_FALSE); if (left->efs_outer_vid != right->efs_outer_vid) return (B_FALSE); if (left->efs_ether_type != right->efs_ether_type) return (B_FALSE); if (left->efs_ip_proto != right->efs_ip_proto) return (B_FALSE); return (B_TRUE); } static __checkReturn boolean_t hunt_filter_same_dest( __in const efx_filter_spec_t *left, __in const efx_filter_spec_t *right) { if ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) && (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) { if (left->efs_rss_context == right->efs_rss_context) return (B_TRUE); } else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) && (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) { if (left->efs_dmaq_id == right->efs_dmaq_id) return (B_TRUE); } return (B_FALSE); } static __checkReturn uint32_t hunt_filter_hash( __in efx_filter_spec_t *spec) { EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t)) == 0); EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) % sizeof (uint32_t)) == 0); /* * As the area of the efx_filter_spec_t we need to hash is DWORD * aligned and an exact number of DWORDs in size we can use the * optimised efx_hash_dwords() rather than efx_hash_bytes() */ return (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid, (sizeof (efx_filter_spec_t) - EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) / sizeof (uint32_t), 0)); } /* * Decide whether a filter should be exclusive or else should allow * delivery to additional recipients. Currently we decide that * filters for specific local unicast MAC and IP addresses are * exclusive. */ static __checkReturn boolean_t hunt_filter_is_exclusive( __in efx_filter_spec_t *spec) { if ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) && !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac)) return (B_TRUE); if ((spec->efs_match_flags & (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) && ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe)) return (B_TRUE); if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) && (spec->efs_loc_host.eo_u8[0] != 0xff)) return (B_TRUE); } return (B_FALSE); } - __checkReturn int + __checkReturn efx_rc_t hunt_filter_restore( __in efx_nic_t *enp) { int tbl_id; efx_filter_spec_t *spec; hunt_filter_table_t *hftp = enp->en_filter.ef_hunt_filter_table; boolean_t restoring; int state; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); for (tbl_id = 0; tbl_id < EFX_HUNT_FILTER_TBL_ROWS; tbl_id++) { EFSYS_LOCK(enp->en_eslp, state); spec = hunt_filter_entry_spec(hftp, tbl_id); if (spec == NULL) { restoring = B_FALSE; } else if (hunt_filter_entry_is_busy(hftp, tbl_id)) { /* Ignore busy entries. */ restoring = B_FALSE; } else { hunt_filter_set_entry_busy(hftp, tbl_id); restoring = B_TRUE; } EFSYS_UNLOCK(enp->en_eslp, state); if (restoring == B_FALSE) continue; if (hunt_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_INSERT, &hftp->hft_entry[tbl_id].hfe_handle); } else { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE, &hftp->hft_entry[tbl_id].hfe_handle); } if (rc != 0) goto fail1; EFSYS_LOCK(enp->en_eslp, state); hunt_filter_set_entry_not_busy(hftp, tbl_id); EFSYS_UNLOCK(enp->en_eslp, state); } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * An arbitrary search limit for the software hash table. As per the linux net * driver. */ #define EFX_HUNT_FILTER_SEARCH_LIMIT 200 -static __checkReturn int +static __checkReturn efx_rc_t hunt_filter_add_internal( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace, __out_opt uint32_t *filter_id) { - int rc; + efx_rc_t rc; hunt_filter_table_t *hftp = enp->en_filter.ef_hunt_filter_table; efx_filter_spec_t *saved_spec; uint32_t hash; unsigned int depth; int ins_index; boolean_t replacing = B_FALSE; unsigned int i; int state; boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); #if EFSYS_OPT_RX_SCALE spec->efs_rss_context = enp->en_rss_context; #endif hash = hunt_filter_hash(spec); /* * FIXME: Add support for inserting filters of different priorities * and removing lower priority multicast filters (bug 42378) */ /* * Find any existing filters with the same match tuple or * else a free slot to insert at. If any of them are busy, * we have to wait and retry. */ for (;;) { ins_index = -1; depth = 1; EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; for (;;) { i = (hash + depth) & (EFX_HUNT_FILTER_TBL_ROWS - 1); saved_spec = hunt_filter_entry_spec(hftp, i); if (!saved_spec) { if (ins_index < 0) { ins_index = i; } } else if (hunt_filter_equal(spec, saved_spec)) { if (hunt_filter_entry_is_busy(hftp, i)) break; if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) { ins_index = i; goto found; } else if (hunt_filter_is_exclusive(spec)) { if (may_replace) { ins_index = i; goto found; } else { rc = EEXIST; goto fail1; } } /* Leave existing */ } /* * Once we reach the maximum search depth, use * the first suitable slot or return EBUSY if * there was none. */ if (depth == EFX_HUNT_FILTER_SEARCH_LIMIT) { if (ins_index < 0) { rc = EBUSY; goto fail2; } goto found; } depth++; } EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; } found: /* * Create a software table entry if necessary, and mark it * busy. We might yet fail to insert, but any attempt to * insert a conflicting filter while we're waiting for the * firmware must find the busy entry. */ saved_spec = hunt_filter_entry_spec(hftp, ins_index); if (saved_spec) { if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) { /* This is a filter we are refreshing */ hunt_filter_set_entry_not_auto_old(hftp, ins_index); goto out_unlock; } replacing = B_TRUE; } else { EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), saved_spec); if (!saved_spec) { rc = ENOMEM; goto fail3; } *saved_spec = *spec; hunt_filter_set_entry(hftp, ins_index, saved_spec); } hunt_filter_set_entry_busy(hftp, ins_index); EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; /* * On replacing the filter handle may change after after a successful * replace operation. */ if (replacing) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_REPLACE, &hftp->hft_entry[ins_index].hfe_handle); } else if (hunt_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_INSERT, &hftp->hft_entry[ins_index].hfe_handle); } else { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE, &hftp->hft_entry[ins_index].hfe_handle); } if (rc != 0) goto fail4; EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; if (replacing) { /* Update the fields that may differ */ saved_spec->efs_priority = spec->efs_priority; saved_spec->efs_flags = spec->efs_flags; saved_spec->efs_rss_context = spec->efs_rss_context; saved_spec->efs_dmaq_id = spec->efs_dmaq_id; } hunt_filter_set_entry_not_busy(hftp, ins_index); out_unlock: EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; if (filter_id) *filter_id = ins_index; return (0); fail4: EFSYS_PROBE(fail4); if (!replacing) { EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), saved_spec); saved_spec = NULL; } hunt_filter_set_entry_not_busy(hftp, ins_index); hunt_filter_set_entry(hftp, ins_index, NULL); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); if (locked) EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace) { - int rc; + efx_rc_t rc; rc = hunt_filter_add_internal(enp, spec, may_replace, NULL); if (rc != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t hunt_filter_delete_internal( __in efx_nic_t *enp, __in uint32_t filter_id) { - int rc; + efx_rc_t rc; hunt_filter_table_t *table = enp->en_filter.ef_hunt_filter_table; efx_filter_spec_t *spec; int state; uint32_t filter_idx = filter_id % EFX_HUNT_FILTER_TBL_ROWS; /* * Find the software table entry and mark it busy. Don't * remove it yet; any attempt to update while we're waiting * for the firmware must find the busy entry. * * FIXME: What if the busy flag is never cleared? */ EFSYS_LOCK(enp->en_eslp, state); while (hunt_filter_entry_is_busy(table, filter_idx)) { EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_SPIN(1); EFSYS_LOCK(enp->en_eslp, state); } if ((spec = hunt_filter_entry_spec(table, filter_idx)) != NULL) { hunt_filter_set_entry_busy(table, filter_idx); } EFSYS_UNLOCK(enp->en_eslp, state); if (spec == NULL) { rc = ENOENT; goto fail1; } /* * Try to remove the hardware filter. This may fail if the MC has * rebooted (which frees all hardware filter resources). */ if (hunt_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_delete(enp, MC_CMD_FILTER_OP_IN_OP_REMOVE, &table->hft_entry[filter_idx].hfe_handle); } else { rc = efx_mcdi_filter_op_delete(enp, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE, &table->hft_entry[filter_idx].hfe_handle); } /* Free the software table entry */ EFSYS_LOCK(enp->en_eslp, state); hunt_filter_set_entry_not_busy(table, filter_idx); hunt_filter_set_entry(table, filter_idx, NULL); EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec); /* Check result of hardware filter removal */ if (rc != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec) { - int rc; + efx_rc_t rc; hunt_filter_table_t *table = enp->en_filter.ef_hunt_filter_table; efx_filter_spec_t *saved_spec; unsigned int hash; unsigned int depth; unsigned int i; int state; boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); hash = hunt_filter_hash(spec); EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; depth = 1; for (;;) { i = (hash + depth) & (EFX_HUNT_FILTER_TBL_ROWS - 1); saved_spec = hunt_filter_entry_spec(table, i); if (saved_spec && hunt_filter_equal(spec, saved_spec) && hunt_filter_same_dest(spec, saved_spec)) { break; } if (depth == EFX_HUNT_FILTER_SEARCH_LIMIT) { rc = ENOENT; goto fail1; } depth++; } EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; rc = hunt_filter_delete_internal(enp, i); if (rc != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); if (locked) EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_get_parser_disp_info( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)]; - int rc; + efx_rc_t rc; uint32_t i; boolean_t support_unknown_ucast = B_FALSE; boolean_t support_unknown_mcast = B_FALSE; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX; MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } *length = MCDI_OUT_DWORD(req, GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES); if (req.emr_out_length_used < MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(*length)) { rc = EMSGSIZE; goto fail2; } memcpy(list, MCDI_OUT2(req, uint32_t, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES), (*length) * sizeof (uint32_t)); EFX_STATIC_ASSERT(sizeof (uint32_t) == MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN); /* * Remove UNKNOWN UCAST and MCAST flags, and if both are present, change * the lower priority one to LOC_MAC_IG. */ for (i = 0; i < *length; i++) { if (list[i] & MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN) { list[i] &= (~MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN); support_unknown_ucast = B_TRUE; } if (list[i] & MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) { list[i] &= (~MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN); support_unknown_mcast = B_TRUE; } if (support_unknown_ucast && support_unknown_mcast) { list[i] &= EFX_FILTER_MATCH_LOC_MAC_IG; break; } } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_filter_supported_filters( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length) { - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_get_parser_disp_info(enp, list, length) != 0)) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t hunt_filter_unicast_refresh( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *addr, __in boolean_t all_unicst, __in efx_filter_flag_t filter_flags) { hunt_filter_table_t *hftp = enp->en_filter.ef_hunt_filter_table; efx_filter_spec_t spec; - int rc; + efx_rc_t rc; if (all_unicst == B_TRUE) goto use_uc_def; /* Insert the filter for the local station address */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, hftp->hft_default_rxq); efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr); rc = hunt_filter_add_internal(enp, &spec, B_TRUE, &hftp->hft_unicst_filter_index); if (rc != 0) { /* * Fall back to an unknown filter. We may be able to subscribe * to it even if we couldn't insert the unicast filter. */ goto use_uc_def; } hftp->hft_unicst_filter_set = B_TRUE; return (0); use_uc_def: /* Insert the unknown unicast filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, hftp->hft_default_rxq); efx_filter_spec_set_uc_def(&spec); rc = hunt_filter_add_internal(enp, &spec, B_TRUE, &hftp->hft_unicst_filter_index); if (rc != 0) goto fail1; hftp->hft_unicst_filter_set = B_TRUE; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); if (hftp->hft_unicst_filter_set != B_FALSE) { (void) hunt_filter_delete_internal(enp, hftp->hft_unicst_filter_index); hftp->hft_unicst_filter_set = B_FALSE; } return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t hunt_filter_multicast_refresh( __in efx_nic_t *enp, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in int count, __in efx_filter_flag_t filter_flags) { hunt_filter_table_t *hftp = enp->en_filter.ef_hunt_filter_table; efx_filter_spec_t spec; uint8_t addr[6]; unsigned i; - int rc; + efx_rc_t rc; if (all_mulcst == B_TRUE) goto use_mc_def; if (mulcst == B_FALSE) count = 0; if (count + (brdcst ? 1 : 0) > EFX_ARRAY_SIZE(hftp->hft_mulcst_filter_indexes)) { /* Too many MAC addresses; use unknown multicast filter */ goto use_mc_def; } /* Insert/renew multicast address list filters */ hftp->hft_mulcst_filter_count = count; for (i = 0; i < hftp->hft_mulcst_filter_count; i++) { efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, hftp->hft_default_rxq); efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, &addrs[i * EFX_MAC_ADDR_LEN]); rc = hunt_filter_add_internal(enp, &spec, B_TRUE, &hftp->hft_mulcst_filter_indexes[i]); if (rc != 0) { /* Rollback, then use unknown multicast filter */ goto rollback; } } if (brdcst == B_TRUE) { /* Insert/renew broadcast address filter */ hftp->hft_mulcst_filter_count++; efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, hftp->hft_default_rxq); EFX_MAC_BROADCAST_ADDR_SET(addr); efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr); rc = hunt_filter_add_internal(enp, &spec, B_TRUE, &hftp->hft_mulcst_filter_indexes[ hftp->hft_mulcst_filter_count - 1]); if (rc != 0) { /* Rollback, then use unknown multicast filter */ goto rollback; } } return (0); rollback: /* * Rollback by removing any filters we have inserted * before inserting the unknown multicast filter. */ while (i--) { (void) hunt_filter_delete_internal(enp, hftp->hft_mulcst_filter_indexes[i]); } hftp->hft_mulcst_filter_count = 0; use_mc_def: /* Insert the unknown multicast filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, hftp->hft_default_rxq); efx_filter_spec_set_mc_def(&spec); rc = hunt_filter_add_internal(enp, &spec, B_TRUE, &hftp->hft_mulcst_filter_indexes[0]); if (rc != 0) goto fail1; hftp->hft_mulcst_filter_count = 1; /* * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic. */ return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t hunt_filter_get_workarounds( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &enp->en_nic_cfg; uint32_t implemented = 0; uint32_t enabled = 0; - int rc; + efx_rc_t rc; rc = efx_mcdi_get_workarounds(enp, &implemented, &enabled); if (rc == 0) { /* Check if chained multicast filter support is enabled */ if (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807) encp->enc_bug26807_workaround = B_TRUE; else encp->enc_bug26807_workaround = B_FALSE; } else if (rc == ENOTSUP) { /* * Firmware is too old to support GET_WORKAROUNDS, and support * for this workaround was implemented later. */ encp->enc_bug26807_workaround = B_FALSE; } else { goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Reconfigure all filters. * If all_unicst and/or all mulcst filters cannot be applied then * return ENOTSUP (Note the filters for the specified addresses are * still applied in this case). */ - __checkReturn int + __checkReturn efx_rc_t hunt_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in int count) { hunt_filter_table_t *table = enp->en_filter.ef_hunt_filter_table; efx_filter_flag_t filter_flags; unsigned i; int all_unicst_rc; int all_mulcst_rc; - int rc; + efx_rc_t rc; if (table->hft_default_rxq == NULL) { /* * Filters direct traffic to the default RXQ, and so cannot be * inserted until it is available. Any currently configured * filters must be removed (ignore errors in case the MC * has rebooted, which removes hardware filters). */ if (table->hft_unicst_filter_set != B_FALSE) { (void) hunt_filter_delete_internal(enp, table->hft_unicst_filter_index); table->hft_unicst_filter_set = B_FALSE; } for (i = 0; i < table->hft_mulcst_filter_count; i++) { (void) hunt_filter_delete_internal(enp, table->hft_mulcst_filter_indexes[i]); } table->hft_mulcst_filter_count = 0; return (0); } if (table->hft_using_rss) filter_flags = EFX_FILTER_FLAG_RX_RSS; else filter_flags = 0; /* Mark old filters which may need to be removed */ if (table->hft_unicst_filter_set != B_FALSE) { hunt_filter_set_entry_auto_old(table, table->hft_unicst_filter_index); } for (i = 0; i < table->hft_mulcst_filter_count; i++) { hunt_filter_set_entry_auto_old(table, table->hft_mulcst_filter_indexes[i]); } /* Insert or renew unicast filters */ if ((all_unicst_rc = hunt_filter_unicast_refresh(enp, mac_addr, all_unicst, filter_flags)) != 0) { if (all_unicst == B_FALSE) { rc = all_unicst_rc; goto fail1; } /* Retry without all_unicast flag */ rc = hunt_filter_unicast_refresh(enp, mac_addr, B_FALSE, filter_flags); if (rc != 0) goto fail2; } /* * WORKAROUND_BUG26807 controls firmware support for chained multicast * filters, and can only be enabled or disabled when the hardware filter * table is empty. * * Firmware will reset (FLR) functions which have inserted filters in * the hardware filter table when the workaround is enabled/disabled. * Functions without any hardware filters are not reset. * * Re-check if the workaround is enabled after adding unicast hardware * filters. This ensures that encp->enc_workaround_bug26807 matches the * firmware state, and that later changes to enable/disable the * workaround will result in this function seeing a reset (FLR). */ if ((rc = hunt_filter_get_workarounds(enp)) != 0) goto fail3; /* Insert or renew multicast filters */ if ((all_mulcst_rc = hunt_filter_multicast_refresh(enp, mulcst, all_mulcst, brdcst, addrs, count, filter_flags)) != 0) { if (all_mulcst == B_FALSE) { rc = all_mulcst_rc; goto fail4; } /* Retry without all_mulcast flag */ rc = hunt_filter_multicast_refresh(enp, mulcst, B_FALSE, brdcst, addrs, count, filter_flags); if (rc != 0) goto fail5; } /* Remove old filters which were not renewed */ for (i = 0; i < EFX_ARRAY_SIZE(table->hft_entry); i++) { if (hunt_filter_entry_is_auto_old(table, i)) { (void) hunt_filter_delete_internal(enp, i); } } /* report if any optional flags were rejected */ if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) || ((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) { rc = ENOTSUP; } return (rc); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); /* Clear auto old flags */ for (i = 0; i < EFX_ARRAY_SIZE(table->hft_entry); i++) { if (hunt_filter_entry_is_auto_old(table, i)) { hunt_filter_set_entry_not_auto_old(table, i); } } return (rc); } void hunt_filter_get_default_rxq( __in efx_nic_t *enp, __out efx_rxq_t **erpp, __out boolean_t *using_rss) { hunt_filter_table_t *table = enp->en_filter.ef_hunt_filter_table; *erpp = table->hft_default_rxq; *using_rss = table->hft_using_rss; } void hunt_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss) { hunt_filter_table_t *table = enp->en_filter.ef_hunt_filter_table; #if EFSYS_OPT_RX_SCALE EFSYS_ASSERT((using_rss == B_FALSE) || (enp->en_rss_context != HUNTINGTON_RSS_CONTEXT_INVALID)); table->hft_using_rss = using_rss; #else EFSYS_ASSERT(using_rss == B_FALSE); table->hft_using_rss = B_FALSE; #endif table->hft_default_rxq = erp; } void hunt_filter_default_rxq_clear( __in efx_nic_t *enp) { hunt_filter_table_t *table = enp->en_filter.ef_hunt_filter_table; table->hft_default_rxq = NULL; table->hft_using_rss = B_FALSE; } #endif /* EFSYS_OPT_FILTER */ #endif /* EFSYS_OPT_HUNTINGTON */ Index: stable/10/sys/dev/sfxge/common/hunt_impl.h =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_impl.h (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_impl.h (revision 293927) @@ -1,1039 +1,1039 @@ /*- * Copyright (c) 2012-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_HUNT_IMPL_H #define _SYS_HUNT_IMPL_H #include "efx.h" #include "efx_regs.h" #include "efx_regs_ef10.h" #include "efx_mcdi.h" #ifdef __cplusplus extern "C" { #endif #define HUNTINGTON_NVRAM_CHUNK 0x80 /* Alignment requirement for value written to RX WPTR: * the WPTR must be aligned to an 8 descriptor boundary */ #define HUNTINGTON_RX_WPTR_ALIGN 8 /* Invalid RSS context handle */ #define HUNTINGTON_RSS_CONTEXT_INVALID (0xffffffff) /* EV */ - __checkReturn int + __checkReturn efx_rc_t hunt_ev_init( __in efx_nic_t *enp); void hunt_ev_fini( __in efx_nic_t *enp); - __checkReturn int + __checkReturn efx_rc_t hunt_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep); void hunt_ev_qdestroy( __in efx_evq_t *eep); - __checkReturn int + __checkReturn efx_rc_t hunt_ev_qprime( __in efx_evq_t *eep, __in unsigned int count); void hunt_ev_qpost( __in efx_evq_t *eep, __in uint16_t data); - __checkReturn int + __checkReturn efx_rc_t hunt_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us); #if EFSYS_OPT_QSTATS void hunt_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ void hunt_ev_rxlabel_init( __in efx_evq_t *eep, __in efx_rxq_t *erp, __in unsigned int label); void hunt_ev_rxlabel_fini( __in efx_evq_t *eep, __in unsigned int label); /* INTR */ - __checkReturn int + __checkReturn efx_rc_t hunt_intr_init( __in efx_nic_t *enp, __in efx_intr_type_t type, __in efsys_mem_t *esmp); void hunt_intr_enable( __in efx_nic_t *enp); void hunt_intr_disable( __in efx_nic_t *enp); void hunt_intr_disable_unlocked( __in efx_nic_t *enp); - __checkReturn int + __checkReturn efx_rc_t hunt_intr_trigger( __in efx_nic_t *enp, __in unsigned int level); void hunt_intr_fini( __in efx_nic_t *enp); /* NIC */ -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nic_probe( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nic_set_drv_limits( __inout efx_nic_t *enp, __in efx_drv_limits_t *edlp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nic_get_vi_pool( __in efx_nic_t *enp, __out uint32_t *vi_countp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nic_get_bar_region( __in efx_nic_t *enp, __in efx_nic_region_t region, __out uint32_t *offsetp, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nic_reset( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nic_init( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nic_register_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern void hunt_nic_fini( __in efx_nic_t *enp); extern void hunt_nic_unprobe( __in efx_nic_t *enp); /* MAC */ -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_mac_poll( __in efx_nic_t *enp, __out efx_link_mode_t *link_modep); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_mac_addr_set( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_mac_reconfigure( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_mac_multicast_list_set( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_mac_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss); extern void hunt_mac_filter_default_rxq_clear( __in efx_nic_t *enp); #if EFSYS_OPT_LOOPBACK -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_mac_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t loopback_type); #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp); #endif /* EFSYS_OPT_MAC_STATS */ /* MCDI */ #if EFSYS_OPT_MCDI -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *mtp); extern void hunt_mcdi_fini( __in efx_nic_t *enp); extern void hunt_mcdi_request_copyin( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __in unsigned int seq, __in boolean_t ev_cpl, __in boolean_t new_epoch); extern __checkReturn boolean_t hunt_mcdi_request_poll( __in efx_nic_t *enp); extern void hunt_mcdi_request_copyout( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp); -extern int +extern efx_rc_t hunt_mcdi_poll_reboot( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_mcdi_fw_update_supported( __in efx_nic_t *enp, __out boolean_t *supportedp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_mcdi_macaddr_change_supported( __in efx_nic_t *enp, __out boolean_t *supportedp); #endif /* EFSYS_OPT_MCDI */ /* NVRAM */ #if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_buf_read_tlv( __in efx_nic_t *enp, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_buf_write_tlv( __inout_bcount(partn_size) caddr_t partn_data, __in size_t partn_size, __in uint32_t tag, __in_bcount(tag_size) caddr_t tag_data, __in size_t tag_size, __out size_t *total_lengthp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_partn_read_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_partn_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_partn_write_segment_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t all_segments); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_partn_size( __in efx_nic_t *enp, __in unsigned int partn, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_partn_lock( __in efx_nic_t *enp, __in unsigned int partn); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_partn_read( __in efx_nic_t *enp, __in unsigned int partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_partn_erase( __in efx_nic_t *enp, __in unsigned int partn, __in unsigned int offset, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_partn_write( __in efx_nic_t *enp, __in unsigned int partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern void hunt_nvram_partn_unlock( __in efx_nic_t *enp, __in unsigned int partn); #endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */ #if EFSYS_OPT_NVRAM #if EFSYS_OPT_DIAG -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_size( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_get_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_rw_start( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out size_t *pref_chunkp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_read_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_erase( __in efx_nic_t *enp, __in efx_nvram_type_t type); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_write_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __in_bcount(size) caddr_t data, __in size_t size); extern void hunt_nvram_rw_finish( __in efx_nic_t *enp, __in efx_nvram_type_t type); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_partn_set_version( __in efx_nic_t *enp, __in unsigned int partn, __in_ecount(4) uint16_t version[4]); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nvram_set_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in_ecount(4) uint16_t version[4]); #endif /* EFSYS_OPT_NVRAM */ /* PHY */ typedef struct hunt_link_state_s { uint32_t hls_adv_cap_mask; uint32_t hls_lp_cap_mask; unsigned int hls_fcntl; efx_link_mode_t hls_link_mode; #if EFSYS_OPT_LOOPBACK efx_loopback_type_t hls_loopback; #endif boolean_t hls_mac_up; } hunt_link_state_t; extern void hunt_phy_link_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_link_mode_t *link_modep); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_phy_get_link( __in efx_nic_t *enp, __out hunt_link_state_t *hlsp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_phy_power( __in efx_nic_t *enp, __in boolean_t on); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_phy_reconfigure( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_phy_verify( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip); #if EFSYS_OPT_PHY_STATS -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES extern const char * hunt_phy_prop_name( __in efx_nic_t *enp, __in unsigned int id); #endif /* EFSYS_OPT_NAMES */ -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_phy_prop_get( __in efx_nic_t *enp, __in unsigned int id, __in uint32_t flags, __out uint32_t *valp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_phy_prop_set( __in efx_nic_t *enp, __in unsigned int id, __in uint32_t val); #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_bist_enable_offline( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt __drv_when(count > 0, __notnull) uint32_t *value_maskp, __out_ecount_opt(count) __drv_when(count > 0, __notnull) unsigned long *valuesp, __in size_t count); extern void hunt_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type); #endif /* EFSYS_OPT_BIST */ /* SRAM */ #if EFSYS_OPT_DIAG -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_sram_test( __in efx_nic_t *enp, __in efx_sram_pattern_fn_t func); #endif /* EFSYS_OPT_DIAG */ /* TX */ -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_tx_init( __in efx_nic_t *enp); extern void hunt_tx_fini( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __in efx_txq_t *etp, __out unsigned int *addedp); extern void hunt_tx_qdestroy( __in efx_txq_t *etp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_tx_qpost( __in efx_txq_t *etp, __in_ecount(n) efx_buffer_t *eb, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); extern void hunt_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_tx_qflush( __in efx_txq_t *etp); extern void hunt_tx_qenable( __in efx_txq_t *etp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_tx_qpio_enable( __in efx_txq_t *etp); extern void hunt_tx_qpio_disable( __in efx_txq_t *etp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_tx_qpio_write( __in efx_txq_t *etp, __in_ecount(buf_length) uint8_t *buffer, __in size_t buf_length, __in size_t pio_buf_offset); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_tx_qpio_post( __in efx_txq_t *etp, __in size_t pkt_length, __in unsigned int completed, __inout unsigned int *addedp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_tx_qdesc_post( __in efx_txq_t *etp, __in_ecount(n) efx_desc_t *ed, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); extern void hunt_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp); extern void hunt_tx_qdesc_tso_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint8_t tcp_flags, __out efx_desc_t *edp); extern void hunt_tx_qdesc_vlantci_create( __in efx_txq_t *etp, __in uint16_t vlan_tci, __out efx_desc_t *edp); #if EFSYS_OPT_QSTATS extern void hunt_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ /* PIO */ /* Missing register definitions */ #ifndef ER_DZ_TX_PIOBUF_OFST #define ER_DZ_TX_PIOBUF_OFST 0x00001000 #endif #ifndef ER_DZ_TX_PIOBUF_STEP #define ER_DZ_TX_PIOBUF_STEP 8192 #endif #ifndef ER_DZ_TX_PIOBUF_ROWS #define ER_DZ_TX_PIOBUF_ROWS 2048 #endif #ifndef ER_DZ_TX_PIOBUF_SIZE #define ER_DZ_TX_PIOBUF_SIZE 2048 #endif #define HUNT_PIOBUF_NBUFS (16) #define HUNT_PIOBUF_SIZE (ER_DZ_TX_PIOBUF_SIZE) #define HUNT_MIN_PIO_ALLOC_SIZE (HUNT_PIOBUF_SIZE / 32) #define HUNT_LEGACY_PF_PRIVILEGE_MASK \ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \ MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS) #define HUNT_LEGACY_VF_PRIVILEGE_MASK 0 typedef uint32_t efx_piobuf_handle_t; #define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t) -1) -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nic_pio_alloc( __inout efx_nic_t *enp, __out uint32_t *bufnump, __out efx_piobuf_handle_t *handlep, __out uint32_t *blknump, __out uint32_t *offsetp, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nic_pio_free( __inout efx_nic_t *enp, __in uint32_t bufnum, __in uint32_t blknum); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nic_pio_link( __inout efx_nic_t *enp, __in uint32_t vi_index, __in efx_piobuf_handle_t handle); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_nic_pio_unlink( __inout efx_nic_t *enp, __in uint32_t vi_index); /* VPD */ #if EFSYS_OPT_VPD -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_vpd_init( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_vpd_size( __in efx_nic_t *enp, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_vpd_set( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_vpd_next( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern void hunt_vpd_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_VPD */ /* RX */ -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_rx_init( __in efx_nic_t *enp); #if EFSYS_OPT_RX_HDR_SPLIT -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_rx_hdr_split_enable( __in efx_nic_t *enp, __in unsigned int hdr_buf_size, __in unsigned int pld_buf_size); #endif /* EFSYS_OPT_RX_HDR_SPLIT */ #if EFSYS_OPT_RX_SCATTER -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size); #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_rx_scale_mode_set( __in efx_nic_t *enp, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_rx_scale_key_set( __in efx_nic_t *enp, __in_ecount(n) uint8_t *key, __in size_t n); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_rx_scale_tbl_set( __in efx_nic_t *enp, __in_ecount(n) unsigned int *table, __in size_t n); #endif /* EFSYS_OPT_RX_SCALE */ extern void hunt_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added); extern void hunt_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_rx_qflush( __in efx_rxq_t *erp); extern void hunt_rx_qenable( __in efx_rxq_t *erp); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __in efx_rxq_t *erp); extern void hunt_rx_qdestroy( __in efx_rxq_t *erp); extern void hunt_rx_fini( __in efx_nic_t *enp); #if EFSYS_OPT_FILTER typedef struct hunt_filter_handle_s { uint32_t hfh_lo; uint32_t hfh_hi; } hunt_filter_handle_t; typedef struct hunt_filter_entry_s { uintptr_t hfe_spec; /* pointer to filter spec plus busy bit */ hunt_filter_handle_t hfe_handle; } hunt_filter_entry_t; /* * BUSY flag indicates that an update is in progress. * AUTO_OLD flag is used to mark and sweep MAC packet filters. */ #define EFX_HUNT_FILTER_FLAG_BUSY 1U #define EFX_HUNT_FILTER_FLAG_AUTO_OLD 2U #define EFX_HUNT_FILTER_FLAGS 3U #define EFX_HUNT_FILTER_TBL_ROWS 8192 /* Allow for the broadcast address to be added to the multicast list */ #define EFX_HUNT_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1) typedef struct hunt_filter_table_s { hunt_filter_entry_t hft_entry[EFX_HUNT_FILTER_TBL_ROWS]; efx_rxq_t * hft_default_rxq; boolean_t hft_using_rss; uint32_t hft_unicst_filter_index; boolean_t hft_unicst_filter_set; uint32_t hft_mulcst_filter_indexes[ EFX_HUNT_FILTER_MULTICAST_FILTERS_MAX]; uint32_t hft_mulcst_filter_count; } hunt_filter_table_t; - __checkReturn int + __checkReturn efx_rc_t hunt_filter_init( __in efx_nic_t *enp); void hunt_filter_fini( __in efx_nic_t *enp); - __checkReturn int + __checkReturn efx_rc_t hunt_filter_restore( __in efx_nic_t *enp); - __checkReturn int + __checkReturn efx_rc_t hunt_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace); - __checkReturn int + __checkReturn efx_rc_t hunt_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_filter_supported_filters( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length); -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in int count); extern void hunt_filter_get_default_rxq( __in efx_nic_t *enp, __out efx_rxq_t **erpp, __out boolean_t *using_rss); extern void hunt_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss); extern void hunt_filter_default_rxq_clear( __in efx_nic_t *enp); #endif /* EFSYS_OPT_FILTER */ -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_pktfilter_set( __in efx_nic_t *enp, __in boolean_t unicst, __in boolean_t brdcst); #if EFSYS_OPT_MCAST_FILTER_LIST -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_pktfilter_mcast_set( __in efx_nic_t *enp, __in uint8_t const *addrs, __in int count); #endif /* EFSYS_OPT_MCAST_FILTER_LIST */ -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_pktfilter_mcast_all( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_get_function_info( __in efx_nic_t *enp, __out uint32_t *pfp, __out_opt uint32_t *vfp); -extern __checkReturn int +extern __checkReturn efx_rc_t efx_mcdi_privilege_mask( __in efx_nic_t *enp, __in uint32_t pf, __in uint32_t vf, __out uint32_t *maskp); #ifdef __cplusplus } #endif #endif /* _SYS_HUNT_IMPL_H */ Index: stable/10/sys/dev/sfxge/common/hunt_intr.c =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_intr.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_intr.c (revision 293927) @@ -1,155 +1,155 @@ /*- * Copyright (c) 2012-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON - __checkReturn int + __checkReturn efx_rc_t hunt_intr_init( __in efx_nic_t *enp, __in efx_intr_type_t type, __in efsys_mem_t *esmp) { _NOTE(ARGUNUSED(enp, type, esmp)) return (0); } void hunt_intr_enable( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } void hunt_intr_disable( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } void hunt_intr_disable_unlocked( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_trigger_interrupt( __in efx_nic_t *enp, __in unsigned int level) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_TRIGGER_INTERRUPT_IN_LEN, MC_CMD_TRIGGER_INTERRUPT_OUT_LEN)]; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); if (level >= enp->en_nic_cfg.enc_intr_limit) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_TRIGGER_INTERRUPT; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_TRIGGER_INTERRUPT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_TRIGGER_INTERRUPT_OUT_LEN; MCDI_IN_SET_DWORD(req, TRIGGER_INTERRUPT_IN_INTR_LEVEL, level); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_intr_trigger( __in efx_nic_t *enp, __in unsigned int level) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); - int rc; + efx_rc_t rc; if (encp->enc_bug41750_workaround) { /* bug 41750: Test interrupts don't work on Greenport */ rc = ENOTSUP; goto fail1; } if ((rc = efx_mcdi_trigger_interrupt(enp, level)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_intr_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } #endif /* EFSYS_OPT_HUNTINGTON */ Index: stable/10/sys/dev/sfxge/common/hunt_mac.c =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_mac.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_mac.c (revision 293927) @@ -1,696 +1,696 @@ /*- * Copyright (c) 2012-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON - __checkReturn int + __checkReturn efx_rc_t hunt_mac_poll( __in efx_nic_t *enp, __out efx_link_mode_t *link_modep) { /* * TBD: Consider a common Siena/Huntington function. The code is * essentially identical. */ efx_port_t *epp = &(enp->en_port); hunt_link_state_t hls; - int rc; + efx_rc_t rc; if ((rc = hunt_phy_get_link(enp, &hls)) != 0) goto fail1; epp->ep_adv_cap_mask = hls.hls_adv_cap_mask; epp->ep_fcntl = hls.hls_fcntl; *link_modep = hls.hls_link_mode; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); *link_modep = EFX_LINK_UNKNOWN; return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp) { /* * TBD: Consider a common Siena/Huntington function. The code is * essentially identical. */ hunt_link_state_t hls; - int rc; + efx_rc_t rc; /* * Because Huntington doesn't *require* polling, we can't rely on * hunt_mac_poll() being executed to populate epp->ep_mac_up. */ if ((rc = hunt_phy_get_link(enp, &hls)) != 0) goto fail1; *mac_upp = hls.hls_mac_up; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Huntington uses MC_CMD_VADAPTOR_SET_MAC to set the * MAC address; the address field in MC_CMD_SET_MAC has no * effect. * MC_CMD_VADAPTOR_SET_MAC requires mac-spoofing privilege and * the port to have no filters or queues active. */ -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_vadapter_set_mac( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_VADAPTOR_SET_MAC_IN_LEN, MC_CMD_VADAPTOR_SET_MAC_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_VADAPTOR_SET_MAC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_VADAPTOR_SET_MAC_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_VADAPTOR_SET_MAC_OUT_LEN; MCDI_IN_SET_DWORD(req, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, enp->en_vport_id); EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, VADAPTOR_SET_MAC_IN_MACADDR), epp->ep_mac_addr); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_mac_addr_set( __in efx_nic_t *enp) { - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_vadapter_set_mac(enp)) != 0) { if (rc != ENOTSUP) goto fail1; /* Fallback for older firmware without Vadapter support */ if ((rc = hunt_mac_reconfigure(enp)) != 0) goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -__checkReturn int +__checkReturn efx_rc_t hunt_mac_reconfigure( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN, MC_CMD_SET_MAC_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_MAC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_MAC_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN; MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu); MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0); EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR), epp->ep_mac_addr); /* * Note: The Huntington MAC does not support REJECT_BRDCST. * The REJECT_UNCST flag will also prevent multicast traffic * from reaching the filters. As Huntington filters drop any * traffic that does not match a filter it is ok to leave the * MAC running in promiscuous mode. See bug41141. */ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT, SET_MAC_IN_REJECT_UNCST, 0, SET_MAC_IN_REJECT_BRDCST, 0); /* * Flow control, whether it is auto-negotiated or not, * is set via the PHY advertised capabilities. When set to * automatic the MAC will use the PHY settings to determine * the flow control settings. */ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, MC_CMD_FCNTL_AUTO); /* Do not include the Ethernet frame checksum in RX packets */ MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_IN_FLAGS, SET_MAC_IN_FLAG_INCLUDE_FCS, 0); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { /* * Unprivileged functions cannot control link state, * but still need to configure filters. */ if (req.emr_rc != EACCES) { rc = req.emr_rc; goto fail1; } } /* * Apply the filters for the MAC configuration. * If the NIC isn't ready to accept filters this may * return success without setting anything. */ rc = efx_filter_reconfigure(enp, epp->ep_mac_addr, epp->ep_all_unicst, epp->ep_mulcst, epp->ep_all_mulcst, epp->ep_brdcst, epp->ep_mulcst_addr_list, epp->ep_mulcst_addr_count); return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_mac_multicast_list_set( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_mac_ops_t *emop = epp->ep_emop; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); /* FIXME: Insert filters for multicast list */ if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_mac_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss) { efx_port_t *epp = &(enp->en_port); efx_rxq_t *old_rxq; boolean_t old_using_rss; - int rc; + efx_rc_t rc; hunt_filter_get_default_rxq(enp, &old_rxq, &old_using_rss); hunt_filter_default_rxq_set(enp, erp, using_rss); rc = efx_filter_reconfigure(enp, epp->ep_mac_addr, epp->ep_all_unicst, epp->ep_mulcst, epp->ep_all_mulcst, epp->ep_brdcst, epp->ep_mulcst_addr_list, epp->ep_mulcst_addr_count); if (rc != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); hunt_filter_default_rxq_set(enp, old_rxq, old_using_rss); return (rc); } void hunt_mac_filter_default_rxq_clear( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); hunt_filter_default_rxq_clear(enp); efx_filter_reconfigure(enp, epp->ep_mac_addr, epp->ep_all_unicst, epp->ep_mulcst, epp->ep_all_mulcst, epp->ep_brdcst, epp->ep_mulcst_addr_list, epp->ep_mulcst_addr_count); } #if EFSYS_OPT_LOOPBACK - __checkReturn int + __checkReturn efx_rc_t hunt_mac_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t loopback_type) { /* * TBD: Consider a common Siena/Huntington function. The code is * essentially identical. */ efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; efx_loopback_type_t old_loopback_type; efx_link_mode_t old_loopback_link_mode; - int rc; + efx_rc_t rc; /* The PHY object handles this on Huntington */ old_loopback_type = epp->ep_loopback_type; old_loopback_link_mode = epp->ep_loopback_link_mode; epp->ep_loopback_type = loopback_type; epp->ep_loopback_link_mode = link_mode; if ((rc = epop->epo_reconfigure(enp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); epp->ep_loopback_type = old_loopback_type; epp->ep_loopback_link_mode = old_loopback_link_mode; return (rc); } #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS #define HUNT_MAC_STAT_READ(_esmp, _field, _eqp) \ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp) - __checkReturn int + __checkReturn efx_rc_t hunt_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp) { efx_qword_t value; efx_qword_t generation_start; efx_qword_t generation_end; _NOTE(ARGUNUSED(enp)) /* Read END first so we don't race with the MC */ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END, &generation_end); EFSYS_MEM_READ_BARRIER(); /* TX */ HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value); EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value); EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value); /* RX */ HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value); EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value); EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]), &(value.eq_dword[0])); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]), &(value.eq_dword[1])); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]), &(value.eq_dword[0])); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]), &(value.eq_dword[1])); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]), &(value.eq_dword[0])); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]), &(value.eq_dword[1])); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]), &(value.eq_dword[0])); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]), &(value.eq_dword[1])); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value); /* Packet memory (EF10 only) */ HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_BB_OVERFLOW]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_BB_OVERFLOW]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_VFIFO_FULL, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_VFIFO_FULL]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_VFIFO_FULL, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_VFIFO_FULL]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_QBB, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_QBB]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_QBB, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_QBB]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_MAPPING, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_MAPPING]), &value); /* RX datapath */ HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_Q_DISABLED_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_Q_DISABLED_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_DI_DROPPED_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_DI_DROPPED_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_STREAMING_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_STREAMING_PKTS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_FETCH]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_WAIT]), &value); /* VADAPTER RX */ HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_BYTES]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_PACKETS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_BYTES]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_OVERFLOW, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_OVERFLOW]), &value); /* VADAPTER TX */ HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_BYTES]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_PACKETS]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_BYTES]), &value); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_OVERFLOW, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_OVERFLOW]), &value); EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); EFSYS_MEM_READ_BARRIER(); HUNT_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START, &generation_start); /* Check that we didn't read the stats in the middle of a DMA */ /* Not a good enough check ? */ if (memcmp(&generation_start, &generation_end, sizeof (generation_start))) return (EAGAIN); if (generationp) *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0); return (0); } #endif /* EFSYS_OPT_MAC_STATS */ #endif /* EFSYS_OPT_HUNTINGTON */ Index: stable/10/sys/dev/sfxge/common/hunt_mcdi.c =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_mcdi.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_mcdi.c (revision 293927) @@ -1,465 +1,465 @@ /*- * Copyright (c) 2012-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON #if EFSYS_OPT_MCDI #ifndef WITH_MCDI_V2 #error "WITH_MCDI_V2 required for Huntington MCDIv2 commands." #endif typedef enum efx_mcdi_header_type_e { EFX_MCDI_HEADER_TYPE_V1, /* MCDIv0 (BootROM), MCDIv1 commands */ EFX_MCDI_HEADER_TYPE_V2, /* MCDIv2 commands */ } efx_mcdi_header_type_t; /* * Return the header format to use for sending an MCDI request. * * An MCDIv1 (Siena compatible) command should use MCDIv2 encapsulation if the * request input buffer or response output buffer are too large for the MCDIv1 * format. An MCDIv2 command must always be sent using MCDIv2 encapsulation. */ #define EFX_MCDI_HEADER_TYPE(_cmd, _length) \ ((((_cmd) & ~EFX_MASK32(MCDI_HEADER_CODE)) || \ ((_length) & ~EFX_MASK32(MCDI_HEADER_DATALEN))) ? \ EFX_MCDI_HEADER_TYPE_V2 : EFX_MCDI_HEADER_TYPE_V1) /* * MCDI Header NOT_EPOCH flag * ========================== * A new epoch begins at initial startup or after an MC reboot, and defines when * the MC should reject stale MCDI requests. * * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1. * * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0. */ - __checkReturn int + __checkReturn efx_rc_t hunt_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *emtp) { efsys_mem_t *esmp = emtp->emt_dma_mem; efx_dword_t dword; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA); /* A host DMA buffer is required for Huntington MCDI */ if (esmp == NULL) { rc = EINVAL; goto fail1; } /* * Ensure that the MC doorbell is in a known state before issuing MCDI * commands. The recovery algorithm requires that the MC command buffer * must be 256 byte aligned. See bug24769. */ if ((EFSYS_MEM_ADDR(esmp) & 0xFF) != 0) { rc = EINVAL; goto fail2; } EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 1); EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE); /* Save initial MC reboot status */ (void) hunt_mcdi_poll_reboot(enp); /* Start a new epoch (allow fresh MCDI requests to succeed) */ efx_mcdi_new_epoch(enp); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_mcdi_fini( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); emip->emi_new_epoch = B_FALSE; } void hunt_mcdi_request_copyin( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __in unsigned int seq, __in boolean_t ev_cpl, __in boolean_t new_epoch) { const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; efsys_mem_t *esmp = emtp->emt_dma_mem; efx_mcdi_header_type_t hdr_type; efx_dword_t dword; unsigned int xflags; unsigned int pos; size_t offset; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_HUNTINGTON); xflags = 0; if (ev_cpl) xflags |= MCDI_HEADER_XFLAGS_EVREQ; offset = 0; hdr_type = EFX_MCDI_HEADER_TYPE(emrp->emr_cmd, MAX(emrp->emr_in_length, emrp->emr_out_length)); if (hdr_type == EFX_MCDI_HEADER_TYPE_V2) { /* Construct MCDI v2 header */ EFX_POPULATE_DWORD_8(dword, MCDI_HEADER_CODE, MC_CMD_V2_EXTN, MCDI_HEADER_RESYNC, 1, MCDI_HEADER_DATALEN, 0, MCDI_HEADER_SEQ, seq, MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, MCDI_HEADER_ERROR, 0, MCDI_HEADER_RESPONSE, 0, MCDI_HEADER_XFLAGS, xflags); EFSYS_MEM_WRITED(esmp, offset, &dword); offset += sizeof (dword); EFX_POPULATE_DWORD_2(dword, MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd, MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length); EFSYS_MEM_WRITED(esmp, offset, &dword); offset += sizeof (dword); } else { /* Construct MCDI v1 header */ EFX_POPULATE_DWORD_8(dword, MCDI_HEADER_CODE, emrp->emr_cmd, MCDI_HEADER_RESYNC, 1, MCDI_HEADER_DATALEN, emrp->emr_in_length, MCDI_HEADER_SEQ, seq, MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, MCDI_HEADER_ERROR, 0, MCDI_HEADER_RESPONSE, 0, MCDI_HEADER_XFLAGS, xflags); EFSYS_MEM_WRITED(esmp, offset, &dword); offset += sizeof (dword); } /* Construct the payload */ for (pos = 0; pos < emrp->emr_in_length; pos += sizeof (efx_dword_t)) { memcpy(&dword, MCDI_IN(*emrp, efx_dword_t, pos), MIN(sizeof (dword), emrp->emr_in_length - pos)); EFSYS_MEM_WRITED(esmp, offset + pos, &dword); } /* Ring the doorbell to post the command DMA address to the MC */ EFSYS_ASSERT((EFSYS_MEM_ADDR(esmp) & 0xFF) == 0); /* Guarantee ordering of memory (MCDI request) and PIO (MC doorbell) */ EFSYS_DMA_SYNC_FOR_DEVICE(esmp, 0, offset + emrp->emr_in_length); EFSYS_PIO_WRITE_BARRIER(); EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, EFSYS_MEM_ADDR(esmp) >> 32); EFX_BAR_WRITED(enp, ER_DZ_MC_DB_LWRD_REG, &dword, B_FALSE); EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, EFSYS_MEM_ADDR(esmp) & 0xffffffff); EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE); } void hunt_mcdi_request_copyout( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp) { const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; efsys_mem_t *esmp = emtp->emt_dma_mem; unsigned int pos; unsigned int offset; efx_dword_t hdr; efx_dword_t hdr2; efx_dword_t data; size_t bytes; if (emrp->emr_out_buf == NULL) return; /* Read the command header to detect MCDI response format */ EFSYS_MEM_READD(esmp, 0, &hdr); if (EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) { offset = 2 * sizeof (efx_dword_t); /* * Read the actual payload length. The length given in the event * is only correct for responses with the V1 format. */ EFSYS_MEM_READD(esmp, sizeof (efx_dword_t), &hdr2); emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr2, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); } else { offset = sizeof (efx_dword_t); } /* Copy payload out into caller supplied buffer */ bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length); for (pos = 0; pos < bytes; pos += sizeof (efx_dword_t)) { EFSYS_MEM_READD(esmp, offset + pos, &data); memcpy(MCDI_OUT(*emrp, efx_dword_t, pos), &data, MIN(sizeof (data), bytes - pos)); } } __checkReturn boolean_t hunt_mcdi_request_poll( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; efsys_mem_t *esmp = emtp->emt_dma_mem; efx_mcdi_req_t *emrp; efx_dword_t dword; unsigned int seq; unsigned int cmd; unsigned int length; size_t offset; int state; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_HUNTINGTON); /* Serialise against post-watchdog efx_mcdi_ev* */ EFSYS_LOCK(enp->en_eslp, state); EFSYS_ASSERT(emip->emi_pending_req != NULL); EFSYS_ASSERT(!emip->emi_ev_cpl); emrp = emip->emi_pending_req; offset = 0; /* Read the command header */ EFSYS_MEM_READD(esmp, offset, &dword); offset += sizeof (efx_dword_t); if (EFX_DWORD_FIELD(dword, MCDI_HEADER_RESPONSE) == 0) { EFSYS_UNLOCK(enp->en_eslp, state); return (B_FALSE); } if (EFX_DWORD_FIELD(dword, MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) { efx_dword_t dword2; EFSYS_MEM_READD(esmp, offset, &dword2); offset += sizeof (efx_dword_t); cmd = EFX_DWORD_FIELD(dword2, MC_CMD_V2_EXTN_IN_EXTENDED_CMD); length = EFX_DWORD_FIELD(dword2, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); } else { cmd = EFX_DWORD_FIELD(dword, MCDI_HEADER_CODE); length = EFX_DWORD_FIELD(dword, MCDI_HEADER_DATALEN); } /* Request complete */ emip->emi_pending_req = NULL; seq = (emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ); /* Check for synchronous reboot */ if (EFX_DWORD_FIELD(dword, MCDI_HEADER_ERROR) != 0 && length == 0) { /* The MC has rebooted since the request was sent. */ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); hunt_mcdi_poll_reboot(enp); EFSYS_UNLOCK(enp->en_eslp, state); rc = EIO; goto fail1; } /* Ensure stale MCDI requests fail after an MC reboot. */ emip->emi_new_epoch = B_FALSE; EFSYS_UNLOCK(enp->en_eslp, state); /* Check that the returned data is consistent */ if (cmd != emrp->emr_cmd || EFX_DWORD_FIELD(dword, MCDI_HEADER_SEQ) != seq) { /* Response is for a different request */ rc = EIO; goto fail2; } if (EFX_DWORD_FIELD(dword, MCDI_HEADER_ERROR)) { efx_dword_t errdword; int errcode; int argnum; /* Read error code (and arg num for MCDI v2 commands) */ EFSYS_MEM_READD(esmp, offset + MC_CMD_ERR_CODE_OFST, &errdword); errcode = EFX_DWORD_FIELD(errdword, EFX_DWORD_0); EFSYS_MEM_READD(esmp, offset + MC_CMD_ERR_ARG_OFST, &errdword); argnum = EFX_DWORD_FIELD(errdword, EFX_DWORD_0); rc = efx_mcdi_request_errcode(errcode); if (!emrp->emr_quiet) { EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd, int, errcode, int, argnum); } goto fail3; } else { emrp->emr_out_length_used = length; emrp->emr_rc = 0; hunt_mcdi_request_copyout(enp, emrp); } goto out; fail3: if (!emrp->emr_quiet) EFSYS_PROBE(fail3); fail2: if (!emrp->emr_quiet) EFSYS_PROBE(fail2); fail1: if (!emrp->emr_quiet) - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); /* Fill out error state */ emrp->emr_rc = rc; emrp->emr_out_length_used = 0; /* Reboot/Assertion */ if (rc == EIO || rc == EINTR) efx_mcdi_raise_exception(enp, emrp, rc); out: return (B_TRUE); } - int + efx_rc_t hunt_mcdi_poll_reboot( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_dword_t dword; uint32_t old_status; uint32_t new_status; - int rc; + efx_rc_t rc; old_status = emip->emi_mc_reboot_status; /* Update MC reboot status word */ EFX_BAR_TBL_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, 0, &dword, B_FALSE); new_status = dword.ed_u32[0]; /* MC has rebooted if the value has changed */ if (new_status != old_status) { emip->emi_mc_reboot_status = new_status; /* * FIXME: Ignore detected MC REBOOT for now. * * The Siena support for checking for MC reboot from status * flags is broken - see comments in siena_mcdi_poll_reboot(). * As the generic MCDI code is shared the Huntington reboot * detection suffers similar problems. * * Do not report an error when the boot status changes until * this can be handled by common code drivers (and reworked to * support Siena too). */ if (B_FALSE) { rc = EIO; goto fail1; } } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_mcdi_fw_update_supported( __in efx_nic_t *enp, __out boolean_t *supportedp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_HUNTINGTON); /* use privilege mask state at MCDI attach */ *supportedp = (encp->enc_privilege_mask & MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN) == MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN; return (0); } - __checkReturn int + __checkReturn efx_rc_t hunt_mcdi_macaddr_change_supported( __in efx_nic_t *enp, __out boolean_t *supportedp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_HUNTINGTON); /* use privilege mask state at MCDI attach */ *supportedp = (encp->enc_privilege_mask & MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING) == MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING; return (0); } #endif /* EFSYS_OPT_MCDI */ #endif /* EFSYS_OPT_HUNTINGTON */ Index: stable/10/sys/dev/sfxge/common/hunt_nic.c =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_nic.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_nic.c (revision 293927) @@ -1,1772 +1,1772 @@ /*- * Copyright (c) 2012-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #include "mcdi_mon.h" #if EFSYS_OPT_HUNTINGTON #include "ef10_tlv_layout.h" -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_get_port_assignment( __in efx_nic_t *enp, __out uint32_t *portp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)]; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_get_port_modes( __in efx_nic_t *enp, __out uint32_t *modesp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN, MC_CMD_GET_PORT_MODES_OUT_LEN)]; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PORT_MODES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } /* Accept pre-Medford size (8 bytes - no CurrentMode field) */ if (req.emr_out_length_used < MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) { rc = EMSGSIZE; goto fail2; } *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_vadaptor_alloc( __in efx_nic_t *enp, __in uint32_t port_id) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN, MC_CMD_VADAPTOR_ALLOC_OUT_LEN)]; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_VADAPTOR_ALLOC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN; MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_vadaptor_free( __in efx_nic_t *enp, __in uint32_t port_id) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN, MC_CMD_VADAPTOR_FREE_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_VADAPTOR_FREE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN; MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_get_mac_address_pf( __in efx_nic_t *enp, __out_ecount_opt(6) uint8_t mac_addrp[6]) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)]; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) { rc = EMSGSIZE; goto fail2; } if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) { rc = ENOENT; goto fail3; } if (mac_addrp != NULL) { uint8_t *addrp; addrp = MCDI_OUT2(req, uint8_t, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE); EFX_MAC_ADDR_COPY(mac_addrp, addrp); } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_get_mac_address_vf( __in efx_nic_t *enp, __out_ecount_opt(6) uint8_t mac_addrp[6]) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)]; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX; MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, EVB_PORT_ID_ASSIGNED); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) { rc = EMSGSIZE; goto fail2; } if (MCDI_OUT_DWORD(req, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) { rc = ENOENT; goto fail3; } if (mac_addrp != NULL) { uint8_t *addrp; addrp = MCDI_OUT2(req, uint8_t, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR); EFX_MAC_ADDR_COPY(mac_addrp, addrp); } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_get_clock( __in efx_nic_t *enp, __out uint32_t *sys_freqp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN, MC_CMD_GET_CLOCK_OUT_LEN)]; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_CLOCK; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ); if (*sys_freqp == 0) { rc = EINVAL; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_get_vector_cfg( __in efx_nic_t *enp, __out_opt uint32_t *vec_basep, __out_opt uint32_t *pf_nvecp, __out_opt uint32_t *vf_nvecp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN, MC_CMD_GET_VECTOR_CFG_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_VECTOR_CFG; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) { rc = EMSGSIZE; goto fail2; } if (vec_basep != NULL) *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE); if (pf_nvecp != NULL) *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF); if (vf_nvecp != NULL) *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_get_capabilities( __in efx_nic_t *enp, __out efx_dword_t *flagsp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN, MC_CMD_GET_CAPABILITIES_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_CAPABILITIES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_CAPABILITIES_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *flagsp = *MCDI_OUT2(req, efx_dword_t, GET_CAPABILITIES_OUT_FLAGS1); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_alloc_vis( __in efx_nic_t *enp, __in uint32_t min_vi_count, __in uint32_t max_vi_count, __out_opt uint32_t *vi_basep, __out uint32_t *vi_countp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN, MC_CMD_ALLOC_VIS_OUT_LEN)]; - int rc; + efx_rc_t rc; if (vi_countp == NULL) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_ALLOC_VIS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_ALLOC_VIS_OUT_LEN; MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count); MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) { rc = EMSGSIZE; goto fail3; } if (vi_basep != NULL) *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE); if (vi_countp != NULL) *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_free_vis( __in efx_nic_t *enp) { efx_mcdi_req_t req; - int rc; + efx_rc_t rc; EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0); EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0); req.emr_cmd = MC_CMD_FREE_VIS; req.emr_in_buf = NULL; req.emr_in_length = 0; req.emr_out_buf = NULL; req.emr_out_length = 0; efx_mcdi_execute_quiet(enp, &req); /* Ignore ELREADY (no allocated VIs, so nothing to free) */ if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_alloc_piobuf( __in efx_nic_t *enp, __out efx_piobuf_handle_t *handlep) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN, MC_CMD_ALLOC_PIOBUF_OUT_LEN)]; - int rc; + efx_rc_t rc; if (handlep == NULL) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_ALLOC_PIOBUF; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN; efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { rc = EMSGSIZE; goto fail3; } *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_free_piobuf( __in efx_nic_t *enp, __in efx_piobuf_handle_t handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN, MC_CMD_FREE_PIOBUF_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FREE_PIOBUF; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN; MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_link_piobuf( __in efx_nic_t *enp, __in uint32_t vi_index, __in efx_piobuf_handle_t handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN, MC_CMD_LINK_PIOBUF_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LINK_PIOBUF; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN; MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle); MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_unlink_piobuf( __in efx_nic_t *enp, __in uint32_t vi_index) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN, MC_CMD_UNLINK_PIOBUF_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_UNLINK_PIOBUF; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN; MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static void hunt_nic_alloc_piobufs( __in efx_nic_t *enp, __in uint32_t max_piobuf_count) { efx_piobuf_handle_t *handlep; unsigned int i; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(max_piobuf_count, <=, EFX_ARRAY_SIZE(enp->en_u.hunt.enu_piobuf_handle)); enp->en_u.hunt.enu_piobuf_count = 0; for (i = 0; i < max_piobuf_count; i++) { handlep = &enp->en_u.hunt.enu_piobuf_handle[i]; if ((rc = efx_mcdi_alloc_piobuf(enp, handlep)) != 0) goto fail1; enp->en_u.hunt.enu_pio_alloc_map[i] = 0; enp->en_u.hunt.enu_piobuf_count++; } return; fail1: for (i = 0; i < enp->en_u.hunt.enu_piobuf_count; i++) { handlep = &enp->en_u.hunt.enu_piobuf_handle[i]; efx_mcdi_free_piobuf(enp, *handlep); *handlep = EFX_PIOBUF_HANDLE_INVALID; } enp->en_u.hunt.enu_piobuf_count = 0; } static void hunt_nic_free_piobufs( __in efx_nic_t *enp) { efx_piobuf_handle_t *handlep; unsigned int i; for (i = 0; i < enp->en_u.hunt.enu_piobuf_count; i++) { handlep = &enp->en_u.hunt.enu_piobuf_handle[i]; efx_mcdi_free_piobuf(enp, *handlep); *handlep = EFX_PIOBUF_HANDLE_INVALID; } enp->en_u.hunt.enu_piobuf_count = 0; } /* Sub-allocate a block from a piobuf */ - __checkReturn int + __checkReturn efx_rc_t hunt_nic_pio_alloc( __inout efx_nic_t *enp, __out uint32_t *bufnump, __out efx_piobuf_handle_t *handlep, __out uint32_t *blknump, __out uint32_t *offsetp, __out size_t *sizep) { efx_drv_cfg_t *edcp = &enp->en_drv_cfg; uint32_t blk_per_buf; uint32_t buf, blk; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_HUNTINGTON); EFSYS_ASSERT(bufnump); EFSYS_ASSERT(handlep); EFSYS_ASSERT(blknump); EFSYS_ASSERT(offsetp); EFSYS_ASSERT(sizep); if ((edcp->edc_pio_alloc_size == 0) || (enp->en_u.hunt.enu_piobuf_count == 0)) { rc = ENOMEM; goto fail1; } blk_per_buf = HUNT_PIOBUF_SIZE / edcp->edc_pio_alloc_size; for (buf = 0; buf < enp->en_u.hunt.enu_piobuf_count; buf++) { uint32_t *map = &enp->en_u.hunt.enu_pio_alloc_map[buf]; if (~(*map) == 0) continue; EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map))); for (blk = 0; blk < blk_per_buf; blk++) { if ((*map & (1u << blk)) == 0) { *map |= (1u << blk); goto done; } } } rc = ENOMEM; goto fail2; done: *handlep = enp->en_u.hunt.enu_piobuf_handle[buf]; *bufnump = buf; *blknump = blk; *sizep = edcp->edc_pio_alloc_size; *offsetp = blk * (*sizep); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Free a piobuf sub-allocated block */ - __checkReturn int + __checkReturn efx_rc_t hunt_nic_pio_free( __inout efx_nic_t *enp, __in uint32_t bufnum, __in uint32_t blknum) { uint32_t *map; - int rc; + efx_rc_t rc; if ((bufnum >= enp->en_u.hunt.enu_piobuf_count) || (blknum >= (8 * sizeof (*map)))) { rc = EINVAL; goto fail1; } map = &enp->en_u.hunt.enu_pio_alloc_map[bufnum]; if ((*map & (1u << blknum)) == 0) { rc = ENOENT; goto fail2; } *map &= ~(1u << blknum); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nic_pio_link( __inout efx_nic_t *enp, __in uint32_t vi_index, __in efx_piobuf_handle_t handle) { return (efx_mcdi_link_piobuf(enp, vi_index, handle)); } - __checkReturn int + __checkReturn efx_rc_t hunt_nic_pio_unlink( __inout efx_nic_t *enp, __in uint32_t vi_index) { return (efx_mcdi_unlink_piobuf(enp, vi_index)); } -static __checkReturn int +static __checkReturn efx_rc_t hunt_get_datapath_caps( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_dword_t datapath_capabilities; - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_get_capabilities(enp, &datapath_capabilities)) != 0) goto fail1; /* * Huntington RXDP firmware inserts a 0 or 14 byte prefix. * We only support the 14 byte prefix here. */ if (MCDI_CMD_DWORD_FIELD(&datapath_capabilities, GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14) != 1) { rc = ENOTSUP; goto fail2; } encp->enc_rx_prefix_size = 14; /* Check if the firmware supports TSO */ if (MCDI_CMD_DWORD_FIELD(&datapath_capabilities, GET_CAPABILITIES_OUT_TX_TSO) == 1) encp->enc_fw_assisted_tso_enabled = B_TRUE; else encp->enc_fw_assisted_tso_enabled = B_FALSE; /* Check if the firmware has vadapter/vport/vswitch support */ if (MCDI_CMD_DWORD_FIELD(&datapath_capabilities, GET_CAPABILITIES_OUT_EVB) == 1) encp->enc_datapath_cap_evb = B_TRUE; else encp->enc_datapath_cap_evb = B_FALSE; /* Check if the firmware supports VLAN insertion */ if (MCDI_CMD_DWORD_FIELD(&datapath_capabilities, GET_CAPABILITIES_OUT_TX_VLAN_INSERTION) == 1) encp->enc_hw_tx_insert_vlan_enabled = B_TRUE; else encp->enc_hw_tx_insert_vlan_enabled = B_FALSE; /* Check if the firmware supports RX event batching */ if (MCDI_CMD_DWORD_FIELD(&datapath_capabilities, GET_CAPABILITIES_OUT_RX_BATCHING) == 1) { encp->enc_rx_batching_enabled = B_TRUE; encp->enc_rx_batch_max = 16; } else { encp->enc_rx_batching_enabled = B_FALSE; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * The external port mapping is a one-based numbering of the external * connectors on the board. It does not distinguish off-board separated * outputs such as multi-headed cables. * The number of ports that map to each external port connector * on the board is determined by the chip family and the port modes to * which the NIC can be configured. The mapping table lists modes with * port numbering requirements in increasing order. */ static struct { efx_family_t family; uint32_t modes_mask; uint32_t stride; } __hunt_external_port_mappings[] = { /* Supported modes requiring 1 output per port */ { EFX_FAMILY_HUNTINGTON, (1 << TLV_PORT_MODE_10G) | (1 << TLV_PORT_MODE_10G_10G) | (1 << TLV_PORT_MODE_10G_10G_10G_10G), 1 }, /* Supported modes requiring 2 outputs per port */ { EFX_FAMILY_HUNTINGTON, (1 << TLV_PORT_MODE_40G) | (1 << TLV_PORT_MODE_40G_40G) | (1 << TLV_PORT_MODE_40G_10G_10G) | (1 << TLV_PORT_MODE_10G_10G_40G), 2 } /* * NOTE: Medford modes will require 4 outputs per port: * TLV_PORT_MODE_10G_10G_10G_10G_Q * TLV_PORT_MODE_10G_10G_10G_10G_Q2 * The Q2 mode routes outputs to external port 2. Support for this * will require a new field specifying the number to add after * scaling by stride. This is fixed at 1 currently. */ }; -static __checkReturn int +static __checkReturn efx_rc_t hunt_external_port_mapping( __in efx_nic_t *enp, __in uint32_t port, __out uint8_t *external_portp) { - int rc; + efx_rc_t rc; int i; uint32_t port_modes; uint32_t matches; uint32_t stride = 1; /* default 1-1 mapping */ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes)) != 0) { /* No port mode information available - use default mapping */ goto out; } /* * Infer the internal port -> external port mapping from * the possible port modes for this NIC. */ for (i = 0; i < EFX_ARRAY_SIZE(__hunt_external_port_mappings); ++i) { if (__hunt_external_port_mappings[i].family != enp->en_family) continue; matches = (__hunt_external_port_mappings[i].modes_mask & port_modes); if (matches != 0) { stride = __hunt_external_port_mappings[i].stride; port_modes &= ~matches; } } if (port_modes != 0) { /* Some advertised modes are not supported */ rc = ENOTSUP; goto fail1; } out: /* * Scale as required by last matched mode and then convert to * one-based numbering */ *external_portp = (uint8_t)(port / stride) + 1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t hunt_board_cfg( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint8_t mac_addr[6]; uint32_t board_type = 0; hunt_link_state_t hls; efx_port_t *epp = &(enp->en_port); uint32_t port; uint32_t pf; uint32_t vf; uint32_t mask; uint32_t flags; uint32_t sysclk; uint32_t base, nvec; - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0) goto fail1; /* * NOTE: The MCDI protocol numbers ports from zero. * The common code MCDI interface numbers ports from one. */ emip->emi_port = port + 1; if ((rc = hunt_external_port_mapping(enp, port, &encp->enc_external_port)) != 0) goto fail2; /* * Get PCIe function number from firmware (used for * per-function privilege and dynamic config info). * - PCIe PF: pf = PF number, vf = 0xffff. * - PCIe VF: pf = parent PF, vf = VF number. */ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0) goto fail3; encp->enc_pf = pf; encp->enc_vf = vf; /* MAC address for this function */ if (EFX_PCI_FUNCTION_IS_PF(encp)) { rc = efx_mcdi_get_mac_address_pf(enp, mac_addr); } else { rc = efx_mcdi_get_mac_address_vf(enp, mac_addr); } if ((rc == 0) && (mac_addr[0] & 0x02)) { /* * If the static config does not include a global MAC address * pool then the board may return a locally administered MAC * address (this should only happen on incorrectly programmed * boards). */ rc = EINVAL; } if (rc != 0) goto fail4; EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); /* Board configuration */ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL); if (rc != 0) { /* Unprivileged functions may not be able to read board cfg */ if (rc == EACCES) board_type = 0; else goto fail5; } encp->enc_board_type = board_type; encp->enc_clk_mult = 1; /* not used for Huntington */ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) goto fail6; /* Obtain the default PHY advertised capabilities */ if ((rc = hunt_phy_get_link(enp, &hls)) != 0) goto fail7; epp->ep_default_adv_cap_mask = hls.hls_adv_cap_mask; epp->ep_adv_cap_mask = hls.hls_adv_cap_mask; /* * Enable firmware workarounds for hardware errata. * Expected responses are: * - 0 (zero): * Success: workaround enabled or disabled as requested. * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP): * Firmware does not support the MC_CMD_WORKAROUND request. * (assume that the workaround is not supported). * - MC_CMD_ERR_ENOENT (reported as ENOENT): * Firmware does not support the requested workaround. * - MC_CMD_ERR_EPERM (reported as EACCES): * Unprivileged function cannot enable/disable workarounds. * * See efx_mcdi_request_errcode() for MCDI error translations. */ /* * If the bug35388 workaround is enabled, then use an indirect access * method to avoid unsafe EVQ writes. */ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG35388, B_TRUE, NULL); if ((rc == 0) || (rc == EACCES)) encp->enc_bug35388_workaround = B_TRUE; else if ((rc == ENOTSUP) || (rc == ENOENT)) encp->enc_bug35388_workaround = B_FALSE; else goto fail8; /* * If the bug41750 workaround is enabled, then do not test interrupts, * as the test will fail (seen with Greenport controllers). */ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG41750, B_TRUE, NULL); if (rc == 0) { encp->enc_bug41750_workaround = B_TRUE; } else if (rc == EACCES) { /* Assume a controller with 40G ports needs the workaround. */ if (epp->ep_default_adv_cap_mask & EFX_PHY_CAP_40000FDX) encp->enc_bug41750_workaround = B_TRUE; else encp->enc_bug41750_workaround = B_FALSE; } else if ((rc == ENOTSUP) || (rc == ENOENT)) { encp->enc_bug41750_workaround = B_FALSE; } else { goto fail9; } if (EFX_PCI_FUNCTION_IS_VF(encp)) { /* Interrupt testing does not work for VFs. See bug50084. */ encp->enc_bug41750_workaround = B_TRUE; } /* * If the bug26807 workaround is enabled, then firmware has enabled * support for chained multicast filters. Firmware will reset (FLR) * functions which have filters in the hardware filter table when the * workaround is enabled/disabled. * * We must recheck if the workaround is enabled after inserting the * first hardware filter, in case it has been changed since this check. */ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807, B_TRUE, &flags); if (rc == 0) { encp->enc_bug26807_workaround = B_TRUE; if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) { /* * Other functions had installed filters before the * workaround was enabled, and they have been reset * by firmware. */ EFSYS_PROBE(bug26807_workaround_flr_done); /* FIXME: bump MC warm boot count ? */ } } else if (rc == EACCES) { /* * Unprivileged functions cannot enable the workaround in older * firmware. */ encp->enc_bug26807_workaround = B_FALSE; } else if ((rc == ENOTSUP) || (rc == ENOENT)) { encp->enc_bug26807_workaround = B_FALSE; } else { goto fail10; } /* Get sysclk frequency (in MHz). */ if ((rc = efx_mcdi_get_clock(enp, &sysclk)) != 0) goto fail11; /* * The timer quantum is 1536 sysclk cycles, documented for the * EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units. */ encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */ if (encp->enc_bug35388_workaround) { encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH) / 1000; } else { encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; } /* Check capabilities of running datapath firmware */ if ((rc = hunt_get_datapath_caps(enp)) != 0) goto fail12; /* Alignment for receive packet DMA buffers */ encp->enc_rx_buf_align_start = 1; encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */ /* Alignment for WPTR updates */ encp->enc_rx_push_align = HUNTINGTON_RX_WPTR_ALIGN; /* * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available * resources (allocated to this PCIe function), which is zero until * after we have allocated VIs. */ encp->enc_evq_limit = 1024; encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; encp->enc_buftbl_limit = 0xFFFFFFFF; encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS; encp->enc_piobuf_size = HUNT_PIOBUF_SIZE; /* * Get the current privilege mask. Note that this may be modified * dynamically, so this value is informational only. DO NOT use * the privilege mask to check for sufficient privileges, as that * can result in time-of-check/time-of-use bugs. */ if ((rc = efx_mcdi_privilege_mask(enp, pf, vf, &mask)) != 0) { if (rc != ENOTSUP) goto fail13; /* Fallback for old firmware without privilege mask support */ if (EFX_PCI_FUNCTION_IS_PF(encp)) { /* Assume PF has admin privilege */ mask = HUNT_LEGACY_PF_PRIVILEGE_MASK; } else { /* VF is always unprivileged by default */ mask = HUNT_LEGACY_VF_PRIVILEGE_MASK; } } encp->enc_privilege_mask = mask; /* Get interrupt vector limits */ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { if (EFX_PCI_FUNCTION_IS_PF(encp)) goto fail14; /* Ignore error (cannot query vector limits from a VF). */ base = 0; nvec = 1024; } encp->enc_intr_vec_base = base; encp->enc_intr_limit = nvec; /* * Maximum number of bytes into the frame the TCP header can start for * firmware assisted TSO to work. */ encp->enc_tx_tso_tcp_header_offset_limit = 208; return (0); fail14: EFSYS_PROBE(fail14); fail13: EFSYS_PROBE(fail13); fail12: EFSYS_PROBE(fail12); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nic_probe( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_HUNTINGTON); /* Read and clear any assertion state */ if ((rc = efx_mcdi_read_assertion(enp)) != 0) goto fail1; /* Exit the assertion handler */ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) if (rc != EACCES) goto fail2; if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0) goto fail3; if ((rc = hunt_board_cfg(enp)) != 0) if (rc != EACCES) goto fail4; /* * Set default driver config limits (based on board config). * * FIXME: For now allocate a fixed number of VIs which is likely to be * sufficient and small enough to allow multiple functions on the same * port. */ edcp->edc_min_vi_count = edcp->edc_max_vi_count = MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit)); /* The client driver must configure and enable PIO buffer support */ edcp->edc_max_piobuf_count = 0; edcp->edc_pio_alloc_size = 0; #if EFSYS_OPT_MAC_STATS /* Wipe the MAC statistics */ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0) goto fail5; #endif #if EFSYS_OPT_LOOPBACK if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0) goto fail6; #endif #if EFSYS_OPT_MON_STATS if ((rc = mcdi_mon_cfg_build(enp)) != 0) { /* Unprivileged functions do not have access to sensors */ if (rc != EACCES) goto fail7; } #endif encp->enc_features = enp->en_features; return (0); #if EFSYS_OPT_MON_STATS fail7: EFSYS_PROBE(fail7); #endif #if EFSYS_OPT_LOOPBACK fail6: EFSYS_PROBE(fail6); #endif #if EFSYS_OPT_MAC_STATS fail5: EFSYS_PROBE(fail5); #endif fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nic_set_drv_limits( __inout efx_nic_t *enp, __in efx_drv_limits_t *edlp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); uint32_t min_evq_count, max_evq_count; uint32_t min_rxq_count, max_rxq_count; uint32_t min_txq_count, max_txq_count; - int rc; + efx_rc_t rc; if (edlp == NULL) { rc = EINVAL; goto fail1; } /* Get minimum required and maximum usable VI limits */ min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit); min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit); min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit); edcp->edc_min_vi_count = MAX(min_evq_count, MAX(min_rxq_count, min_txq_count)); max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit); max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit); max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit); edcp->edc_max_vi_count = MAX(max_evq_count, MAX(max_rxq_count, max_txq_count)); /* * Check limits for sub-allocated piobuf blocks. * PIO is optional, so don't fail if the limits are incorrect. */ if ((encp->enc_piobuf_size == 0) || (encp->enc_piobuf_limit == 0) || (edlp->edl_min_pio_alloc_size == 0) || (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) { /* Disable PIO */ edcp->edc_max_piobuf_count = 0; edcp->edc_pio_alloc_size = 0; } else { uint32_t blk_size, blk_count, blks_per_piobuf; blk_size = MAX(edlp->edl_min_pio_alloc_size, HUNT_MIN_PIO_ALLOC_SIZE); blks_per_piobuf = encp->enc_piobuf_size / blk_size; EFSYS_ASSERT3U(blks_per_piobuf, <=, 32); blk_count = (encp->enc_piobuf_limit * blks_per_piobuf); /* A zero max pio alloc count means unlimited */ if ((edlp->edl_max_pio_alloc_count > 0) && (edlp->edl_max_pio_alloc_count < blk_count)) { blk_count = edlp->edl_max_pio_alloc_count; } edcp->edc_pio_alloc_size = blk_size; edcp->edc_max_piobuf_count = (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nic_reset( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN, MC_CMD_ENTITY_RESET_OUT_LEN)]; - int rc; + efx_rc_t rc; /* hunt_nic_reset() is called to recover from BADASSERT failures. */ if ((rc = efx_mcdi_read_assertion(enp)) != 0) goto fail1; if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) goto fail2; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_ENTITY_RESET; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN; MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG, ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } /* Clear RX/TX DMA queue errors */ enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nic_init( __in efx_nic_t *enp) { efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); uint32_t min_vi_count, max_vi_count; uint32_t vi_count, vi_base; uint32_t i; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_HUNTINGTON); /* Enable reporting of some events (e.g. link change) */ if ((rc = efx_mcdi_log_ctrl(enp)) != 0) goto fail1; /* Allocate (optional) on-chip PIO buffers */ hunt_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count); /* * For best performance, PIO writes should use a write-combined * (WC) memory mapping. Using a separate WC mapping for the PIO * aperture of each VI would be a burden to drivers (and not * possible if the host page size is >4Kbyte). * * To avoid this we use a single uncached (UC) mapping for VI * register access, and a single WC mapping for extra VIs used * for PIO writes. * * Each piobuf must be linked to a VI in the WC mapping, and to * each VI that is using a sub-allocated block from the piobuf. */ min_vi_count = edcp->edc_min_vi_count; max_vi_count = edcp->edc_max_vi_count + enp->en_u.hunt.enu_piobuf_count; /* Ensure that the previously attached driver's VIs are freed */ if ((rc = efx_mcdi_free_vis(enp)) != 0) goto fail2; /* * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this * fails then retrying the request for fewer VI resources may succeed. */ vi_count = 0; if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count, &vi_base, &vi_count)) != 0) goto fail3; EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count); if (vi_count < min_vi_count) { rc = ENOMEM; goto fail4; } enp->en_u.hunt.enu_vi_base = vi_base; enp->en_u.hunt.enu_vi_count = vi_count; if (vi_count < min_vi_count + enp->en_u.hunt.enu_piobuf_count) { /* Not enough extra VIs to map piobufs */ hunt_nic_free_piobufs(enp); } enp->en_u.hunt.enu_pio_write_vi_base = vi_count - enp->en_u.hunt.enu_piobuf_count; /* Save UC memory mapping details */ enp->en_u.hunt.enu_uc_mem_map_offset = 0; if (enp->en_u.hunt.enu_piobuf_count > 0) { enp->en_u.hunt.enu_uc_mem_map_size = (ER_DZ_TX_PIOBUF_STEP * enp->en_u.hunt.enu_pio_write_vi_base); } else { enp->en_u.hunt.enu_uc_mem_map_size = (ER_DZ_TX_PIOBUF_STEP * enp->en_u.hunt.enu_vi_count); } /* Save WC memory mapping details */ enp->en_u.hunt.enu_wc_mem_map_offset = enp->en_u.hunt.enu_uc_mem_map_offset + enp->en_u.hunt.enu_uc_mem_map_size; enp->en_u.hunt.enu_wc_mem_map_size = (ER_DZ_TX_PIOBUF_STEP * enp->en_u.hunt.enu_piobuf_count); /* Link piobufs to extra VIs in WC mapping */ if (enp->en_u.hunt.enu_piobuf_count > 0) { for (i = 0; i < enp->en_u.hunt.enu_piobuf_count; i++) { rc = efx_mcdi_link_piobuf(enp, enp->en_u.hunt.enu_pio_write_vi_base + i, enp->en_u.hunt.enu_piobuf_handle[i]); if (rc != 0) break; } } /* Allocate a vAdapter attached to our upstream vPort/pPort */ if ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) goto fail5; enp->en_vport_id = EVB_PORT_ID_ASSIGNED; return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); hunt_nic_free_piobufs(enp); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nic_get_vi_pool( __in efx_nic_t *enp, __out uint32_t *vi_countp) { EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_HUNTINGTON); /* * Report VIs that the client driver can use. * Do not include VIs used for PIO buffer writes. */ *vi_countp = enp->en_u.hunt.enu_pio_write_vi_base; return (0); } - __checkReturn int + __checkReturn efx_rc_t hunt_nic_get_bar_region( __in efx_nic_t *enp, __in efx_nic_region_t region, __out uint32_t *offsetp, __out size_t *sizep) { - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_HUNTINGTON); /* * TODO: Specify host memory mapping alignment and granularity * in efx_drv_limits_t so that they can be taken into account * when allocating extra VIs for PIO writes. */ switch (region) { case EFX_REGION_VI: /* UC mapped memory BAR region for VI registers */ *offsetp = enp->en_u.hunt.enu_uc_mem_map_offset; *sizep = enp->en_u.hunt.enu_uc_mem_map_size; break; case EFX_REGION_PIO_WRITE_VI: /* WC mapped memory BAR region for piobuf writes */ *offsetp = enp->en_u.hunt.enu_wc_mem_map_offset; *sizep = enp->en_u.hunt.enu_wc_mem_map_size; break; default: rc = EINVAL; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_nic_fini( __in efx_nic_t *enp) { uint32_t i; - int rc; + efx_rc_t rc; (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id); enp->en_vport_id = 0; /* Unlink piobufs from extra VIs in WC mapping */ if (enp->en_u.hunt.enu_piobuf_count > 0) { for (i = 0; i < enp->en_u.hunt.enu_piobuf_count; i++) { rc = efx_mcdi_unlink_piobuf(enp, enp->en_u.hunt.enu_pio_write_vi_base + i); if (rc != 0) break; } } hunt_nic_free_piobufs(enp); (void) efx_mcdi_free_vis(enp); enp->en_u.hunt.enu_vi_count = 0; } void hunt_nic_unprobe( __in efx_nic_t *enp) { #if EFSYS_OPT_MON_STATS mcdi_mon_cfg_free(enp); #endif /* EFSYS_OPT_MON_STATS */ (void) efx_mcdi_drv_attach(enp, B_FALSE); } #if EFSYS_OPT_DIAG - __checkReturn int + __checkReturn efx_rc_t hunt_nic_register_test( __in efx_nic_t *enp) { - int rc; + efx_rc_t rc; /* FIXME */ _NOTE(ARGUNUSED(enp)) if (B_FALSE) { rc = ENOTSUP; goto fail1; } /* FIXME */ return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ #endif /* EFSYS_OPT_HUNTINGTON */ Index: stable/10/sys/dev/sfxge/common/hunt_nvram.c =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_nvram.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_nvram.c (revision 293927) @@ -1,1812 +1,1812 @@ /*- * Copyright (c) 2012-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM #include "ef10_tlv_layout.h" /* Cursor for TLV partition format */ typedef struct tlv_cursor_s { uint32_t *block; /* Base of data block */ uint32_t *current; /* Cursor position */ uint32_t *end; /* End tag position */ uint32_t *limit; /* Last dword of data block */ } tlv_cursor_t; -static __checkReturn int +static __checkReturn efx_rc_t tlv_validate_state( __in tlv_cursor_t *cursor); /* * Operations on TLV formatted partition data. */ static uint32_t tlv_tag( __in tlv_cursor_t *cursor) { uint32_t dword, tag; dword = cursor->current[0]; tag = __LE_TO_CPU_32(dword); return (tag); } static size_t tlv_length( __in tlv_cursor_t *cursor) { uint32_t dword, length; if (tlv_tag(cursor) == TLV_TAG_END) return (0); dword = cursor->current[1]; length = __LE_TO_CPU_32(dword); return ((size_t)length); } static uint8_t * tlv_value( __in tlv_cursor_t *cursor) { if (tlv_tag(cursor) == TLV_TAG_END) return (NULL); return ((uint8_t *)(&cursor->current[2])); } static uint8_t * tlv_item( __in tlv_cursor_t *cursor) { if (tlv_tag(cursor) == TLV_TAG_END) return (NULL); return ((uint8_t *)cursor->current); } /* * TLV item DWORD length is tag + length + value (rounded up to DWORD) * equivalent to tlv_n_words_for_len in mc-comms tlv.c */ #define TLV_DWORD_COUNT(length) \ (1 + 1 + (((length) + sizeof (uint32_t) - 1) / sizeof (uint32_t))) static uint32_t * tlv_next_item_ptr( __in tlv_cursor_t *cursor) { uint32_t length; length = tlv_length(cursor); return (cursor->current + TLV_DWORD_COUNT(length)); } -static int +static efx_rc_t tlv_advance( __in tlv_cursor_t *cursor) { - int rc; + efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (cursor->current == cursor->end) { /* No more tags after END tag */ cursor->current = NULL; rc = ENOENT; goto fail2; } /* Advance to next item and validate */ cursor->current = tlv_next_item_ptr(cursor); if ((rc = tlv_validate_state(cursor)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static int +static efx_rc_t tlv_rewind( __in tlv_cursor_t *cursor) { - int rc; + efx_rc_t rc; cursor->current = cursor->block; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static int +static efx_rc_t tlv_find( __in tlv_cursor_t *cursor, __in uint32_t tag) { - int rc; + efx_rc_t rc; rc = tlv_rewind(cursor); while (rc == 0) { if (tlv_tag(cursor) == tag) break; rc = tlv_advance(cursor); } return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t tlv_validate_state( __in tlv_cursor_t *cursor) { - int rc; + efx_rc_t rc; /* Check cursor position */ if (cursor->current < cursor->block) { rc = EINVAL; goto fail1; } if (cursor->current > cursor->limit) { rc = EINVAL; goto fail2; } if (tlv_tag(cursor) != TLV_TAG_END) { /* Check current item has space for tag and length */ if (cursor->current > (cursor->limit - 2)) { cursor->current = NULL; rc = EFAULT; goto fail3; } /* Check we have value data for current item and another tag */ if (tlv_next_item_ptr(cursor) > (cursor->limit - 1)) { cursor->current = NULL; rc = EFAULT; goto fail4; } } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static int +static efx_rc_t tlv_init_cursor( __in tlv_cursor_t *cursor, __in uint32_t *block, __in uint32_t *limit) { cursor->block = block; cursor->limit = limit; cursor->current = cursor->block; cursor->end = NULL; return (tlv_validate_state(cursor)); } -static int +static efx_rc_t tlv_init_cursor_from_size( __in tlv_cursor_t *cursor, __in uint8_t *block, __in size_t size) { uint32_t *limit; limit = (uint32_t *)(block + size - sizeof (uint32_t)); return (tlv_init_cursor(cursor, (uint32_t *)block, limit)); } -static int +static efx_rc_t tlv_require_end( __in tlv_cursor_t *cursor) { uint32_t *pos; - int rc; + efx_rc_t rc; if (cursor->end == NULL) { pos = cursor->current; if ((rc = tlv_find(cursor, TLV_TAG_END)) != 0) goto fail1; cursor->end = cursor->current; cursor->current = pos; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static size_t tlv_block_length_used( __in tlv_cursor_t *cursor) { - int rc; + efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if ((rc = tlv_require_end(cursor)) != 0) goto fail2; /* Return space used (including the END tag) */ return (cursor->end + 1 - cursor->block) * sizeof (uint32_t); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (0); } static __checkReturn uint32_t * tlv_write( __in tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { uint32_t len = size; uint32_t *ptr; ptr = cursor->current; *ptr++ = __CPU_TO_LE_32(tag); *ptr++ = __CPU_TO_LE_32(len); if (len > 0) { ptr[(len - 1) / sizeof (uint32_t)] = 0; memcpy(ptr, data, len); ptr += P2ROUNDUP(len, sizeof (uint32_t)) / sizeof (*ptr); } return (ptr); } -static __checkReturn int +static __checkReturn efx_rc_t tlv_insert( __in tlv_cursor_t *cursor, __in uint32_t tag, __in uint8_t *data, __in size_t size) { unsigned int delta; - int rc; + efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if ((rc = tlv_require_end(cursor)) != 0) goto fail2; if (tag == TLV_TAG_END) { rc = EINVAL; goto fail3; } delta = TLV_DWORD_COUNT(size); if (cursor->end + 1 + delta > cursor->limit) { rc = ENOSPC; goto fail4; } /* Move data up: new space at cursor->current */ memmove(cursor->current + delta, cursor->current, (cursor->end + 1 - cursor->current) * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end += delta; /* Write new TLV item */ tlv_write(cursor, tag, data, size); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t tlv_modify( __in tlv_cursor_t *cursor, __in uint32_t tag, __in uint8_t *data, __in size_t size) { uint32_t *pos; unsigned int old_ndwords; unsigned int new_ndwords; unsigned int delta; - int rc; + efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (tlv_tag(cursor) == TLV_TAG_END) { rc = EINVAL; goto fail2; } if (tlv_tag(cursor) != tag) { rc = EINVAL; goto fail3; } old_ndwords = TLV_DWORD_COUNT(tlv_length(cursor)); new_ndwords = TLV_DWORD_COUNT(size); if ((rc = tlv_require_end(cursor)) != 0) goto fail4; if (new_ndwords > old_ndwords) { /* Expand space used for TLV item */ delta = new_ndwords - old_ndwords; pos = cursor->current + old_ndwords; if (cursor->end + 1 + delta > cursor->limit) { rc = ENOSPC; goto fail5; } /* Move up: new space at (cursor->current + old_ndwords) */ memmove(pos + delta, pos, (cursor->end + 1 - pos) * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end += delta; } else if (new_ndwords < old_ndwords) { /* Shrink space used for TLV item */ delta = old_ndwords - new_ndwords; pos = cursor->current + new_ndwords; /* Move down: remove words at (cursor->current + new_ndwords) */ memmove(pos, pos + delta, (cursor->end + 1 - pos) * sizeof (uint32_t)); /* Zero the new space at the end of the TLV chain */ memset(cursor->end + 1 - delta, 0, delta * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end -= delta; } /* Write new data */ tlv_write(cursor, tag, data, size); return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Validate TLV formatted partition contents (before writing to flash) */ - __checkReturn int + __checkReturn efx_rc_t efx_nvram_tlv_validate( __in efx_nic_t *enp, __in uint32_t partn, __in_bcount(partn_size) caddr_t partn_data, __in size_t partn_size) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t total_length; uint32_t cksum; int pos; - int rc; + efx_rc_t rc; EFX_STATIC_ASSERT(sizeof (*header) <= HUNTINGTON_NVRAM_CHUNK); if ((partn_data == NULL) || (partn_size == 0)) { rc = EINVAL; goto fail1; } /* The partition header must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, partn_data, partn_size)) != 0) { rc = EFAULT; goto fail2; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail3; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV partition length (includes the END tag) */ total_length = __LE_TO_CPU_32(header->total_length); if (total_length > partn_size) { rc = EFBIG; goto fail4; } /* Check partition ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail5; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail6; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail7; } /* Check generation counts are consistent */ if (trailer->generation != header->generation) { rc = EINVAL; goto fail8; } /* Verify partition checksum */ cksum = 0; for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(partn_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail9; } return (0); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Read and validate a segment from a partition. A segment is a complete * tlv chain between PARTITION_HEADER and PARTITION_END tags. There may * be multiple segments in a partition, so seg_offset allows segments * beyond the first to be read. */ -static __checkReturn int +static __checkReturn efx_rc_t hunt_nvram_read_tlv_segment( __in efx_nic_t *enp, __in uint32_t partn, __in size_t seg_offset, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t total_length; uint32_t cksum; int pos; - int rc; + efx_rc_t rc; EFX_STATIC_ASSERT(sizeof (*header) <= HUNTINGTON_NVRAM_CHUNK); if ((seg_data == NULL) || (max_seg_size == 0)) { rc = EINVAL; goto fail1; } /* Read initial chunk of the segment, starting at offset */ if ((rc = hunt_nvram_partn_read(enp, partn, seg_offset, seg_data, HUNTINGTON_NVRAM_CHUNK)) != 0) { goto fail2; } /* A PARTITION_HEADER tag must be the first item at the given offset */ if ((rc = tlv_init_cursor_from_size(&cursor, seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail3; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail4; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV segment length (includes the END tag) */ total_length = __LE_TO_CPU_32(header->total_length); if (total_length > max_seg_size) { rc = EFBIG; goto fail5; } /* Read the remaining segment content */ if (total_length > HUNTINGTON_NVRAM_CHUNK) { if ((rc = hunt_nvram_partn_read(enp, partn, seg_offset + HUNTINGTON_NVRAM_CHUNK, seg_data + HUNTINGTON_NVRAM_CHUNK, total_length - HUNTINGTON_NVRAM_CHUNK)) != 0) goto fail6; } /* Check segment ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail7; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail8; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail9; } /* Check data read from segment is consistent */ if (trailer->generation != header->generation) { /* * The partition data may have been modified between successive * MCDI NVRAM_READ requests by the MC or another PCI function. * * The caller must retry to obtain consistent partition data. */ rc = EAGAIN; goto fail10; } /* Verify segment checksum */ cksum = 0; for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail11; } return (0); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Read a single TLV item from a host memory * buffer containing a TLV formatted segment. */ - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_buf_read_tlv( __in efx_nic_t *enp, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep) { tlv_cursor_t cursor; caddr_t data; size_t length; caddr_t value; - int rc; + efx_rc_t rc; if ((seg_data == NULL) || (max_seg_size == 0)) { rc = EINVAL; goto fail1; } /* Find requested TLV tag in segment data */ if ((rc = tlv_init_cursor_from_size(&cursor, seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail2; } if ((rc = tlv_find(&cursor, tag)) != 0) { rc = ENOENT; goto fail3; } value = tlv_value(&cursor); length = tlv_length(&cursor); if (length == 0) data = NULL; else { /* Copy out data from TLV item */ EFSYS_KMEM_ALLOC(enp->en_esip, length, data); if (data == NULL) { rc = ENOMEM; goto fail4; } memcpy(data, value, length); } *datap = data; *sizep = length; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Read a single TLV item from the first segment in a TLV formatted partition */ - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_partn_read_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __deref_out_bcount_opt(*seg_sizep) caddr_t *seg_datap, __out size_t *seg_sizep) { caddr_t seg_data = NULL; size_t partn_size = 0; size_t length; caddr_t data; int retry; - int rc; + efx_rc_t rc; /* Allocate sufficient memory for the entire partition */ if ((rc = hunt_nvram_partn_size(enp, partn, &partn_size)) != 0) goto fail1; if (partn_size == 0) { rc = ENOENT; goto fail2; } EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, seg_data); if (seg_data == NULL) { rc = ENOMEM; goto fail3; } /* * Read the first segment in a TLV partition. Retry until consistent * segment contents are returned. Inconsistent data may be read if: * a) the segment contents are invalid * b) the MC has rebooted while we were reading the partition * c) the partition has been modified while we were reading it * Limit retry attempts to ensure forward progress. */ retry = 10; do { rc = hunt_nvram_read_tlv_segment(enp, partn, 0, seg_data, partn_size); } while ((rc == EAGAIN) && (--retry > 0)); if (rc != 0) { /* Failed to obtain consistent segment data */ goto fail4; } if ((rc = hunt_nvram_buf_read_tlv(enp, seg_data, partn_size, tag, &data, &length)) != 0) goto fail5; EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data); *seg_datap = data; *seg_sizep = length; return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Compute the size of a segment. */ - static __checkReturn int + static __checkReturn efx_rc_t hunt_nvram_buf_segment_size( __in caddr_t seg_data, __in size_t max_seg_size, __out size_t *seg_sizep) { - int rc; + efx_rc_t rc; tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; uint32_t cksum; int pos; uint32_t *end_tag_position; uint32_t segment_length; /* A PARTITION_HEADER tag must be the first item at the given offset */ if ((rc = tlv_init_cursor_from_size(&cursor, seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV segment length (includes the END tag) */ *seg_sizep = __LE_TO_CPU_32(header->total_length); if (*seg_sizep > max_seg_size) { rc = EFBIG; goto fail3; } /* Check segment ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail4; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail5; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail6; } end_tag_position = cursor.current; /* Verify segment checksum */ cksum = 0; for (pos = 0; (size_t)pos < *seg_sizep; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail7; } /* * Calculate total length from HEADER to END tags and compare to * max_seg_size and the total_length field in the HEADER tag. */ segment_length = tlv_block_length_used(&cursor); if (segment_length > max_seg_size) { rc = EINVAL; goto fail8; } if (segment_length != *seg_sizep) { rc = EINVAL; goto fail9; } /* Skip over the first HEADER tag. */ rc = tlv_rewind(&cursor); rc = tlv_advance(&cursor); while (rc == 0) { if (tlv_tag(&cursor) == TLV_TAG_END) { /* Check that the END tag is the one found earlier. */ if (cursor.current != end_tag_position) goto fail10; break; } /* Check for duplicate HEADER tags before the END tag. */ if (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail11; } rc = tlv_advance(&cursor); } if (rc != 0) goto fail12; return (0); fail12: EFSYS_PROBE(fail12); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in a host memory buffer containing a TLV * formatted segment. Historically partitions consisted of only one segment. */ - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_buf_write_tlv( __inout_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __in_bcount(tag_size) caddr_t tag_data, __in size_t tag_size, __out size_t *total_lengthp) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; uint32_t generation; uint32_t cksum; int pos; - int rc; + efx_rc_t rc; /* A PARTITION_HEADER tag must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Update the TLV chain to contain the new data */ if ((rc = tlv_find(&cursor, tag)) == 0) { /* Modify existing TLV item */ if ((rc = tlv_modify(&cursor, tag, tag_data, tag_size)) != 0) goto fail3; } else { /* Insert a new TLV item before the PARTITION_TRAILER */ rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER); if (rc != 0) { rc = EINVAL; goto fail4; } if ((rc = tlv_insert(&cursor, tag, tag_data, tag_size)) != 0) { rc = EINVAL; goto fail5; } } /* Find the trailer tag */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail6; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); /* Update PARTITION_HEADER and PARTITION_TRAILER fields */ *total_lengthp = tlv_block_length_used(&cursor); if (*total_lengthp > max_seg_size) { rc = ENOSPC; goto fail7; } generation = __LE_TO_CPU_32(header->generation) + 1; header->total_length = __CPU_TO_LE_32(*total_lengthp); header->generation = __CPU_TO_LE_32(generation); trailer->generation = __CPU_TO_LE_32(generation); /* Recompute PARTITION_TRAILER checksum */ trailer->checksum = 0; cksum = 0; for (pos = 0; (size_t)pos < *total_lengthp; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } trailer->checksum = ~cksum + 1; return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in the first segment of a TLV formatted * dynamic config partition. The first segment is the current active * configuration. */ - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_partn_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size) { return hunt_nvram_partn_write_segment_tlv(enp, partn, tag, data, size, B_FALSE); } /* * Read a segment from nvram at the given offset into a buffer (segment_data) * and optionally write a new tag to it. */ - static __checkReturn int + static __checkReturn efx_rc_t hunt_nvram_segment_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __inout caddr_t *seg_datap, __inout size_t *partn_offsetp, __inout size_t *src_remain_lenp, __inout size_t *dest_remain_lenp, __in boolean_t write) { - int rc; + efx_rc_t rc; int status; size_t original_segment_size; size_t modified_segment_size; /* * Read the segment from NVRAM into the segment_data buffer and validate * it, returning if it does not validate. This is not a failure unless * this is the first segment in a partition. In this case the caller * must propogate the error. */ status = hunt_nvram_read_tlv_segment(enp, partn, *partn_offsetp, *seg_datap, *src_remain_lenp); if (status != 0) return (EINVAL); status = hunt_nvram_buf_segment_size(*seg_datap, *src_remain_lenp, &original_segment_size); if (status != 0) return (EINVAL); if (write) { /* Update the contents of the segment in the buffer */ if ((rc = hunt_nvram_buf_write_tlv(*seg_datap, *dest_remain_lenp, tag, data, size, &modified_segment_size)) != 0) goto fail1; *dest_remain_lenp -= modified_segment_size; *seg_datap += modified_segment_size; } else { /* * We won't modify this segment, but still need to update the * remaining lengths and pointers. */ *dest_remain_lenp -= original_segment_size; *seg_datap += original_segment_size; } *partn_offsetp += original_segment_size; *src_remain_lenp -= original_segment_size; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in either the first segment or in all * segments in a TLV formatted dynamic config partition. Dynamic config * partitions on boards that support RFID are divided into a number of segments, * each formatted like a partition, with header, trailer and end tags. The first * segment is the current active configuration. * * The segments are initialised by manftest and each contain a different * configuration e.g. firmware variant. The firmware can be instructed * via RFID to copy a segment to replace the first segment, hence changing the * active configuration. This allows ops to change the configuration of a board * prior to shipment using RFID. * * Changes to the dynamic config may need to be written to all segments (e.g. * firmware versions) or just the first segment (changes to the active * configuration). See SF-111324-SW "The use of RFID in Solarflare Products". * If only the first segment is written the code still needs to be aware of the * possible presence of subsequent segments as writing to a segment may cause * its size to increase, which would overwrite the subsequent segments and * invalidate them. */ - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_partn_write_segment_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t all_segments) { size_t partn_size = 0; caddr_t partn_data; size_t total_length = 0; - int rc; + efx_rc_t rc; size_t current_offset = 0; size_t remaining_original_length; size_t remaining_modified_length; caddr_t segment_data; EFSYS_ASSERT3U(partn, ==, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG); /* Allocate sufficient memory for the entire partition */ if ((rc = hunt_nvram_partn_size(enp, partn, &partn_size)) != 0) goto fail1; EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, partn_data); if (partn_data == NULL) { rc = ENOMEM; goto fail2; } remaining_original_length = partn_size; remaining_modified_length = partn_size; segment_data = partn_data; /* Lock the partition */ if ((rc = hunt_nvram_partn_lock(enp, partn)) != 0) goto fail3; /* Iterate over each (potential) segment to update it. */ do { boolean_t write = all_segments || current_offset == 0; rc = hunt_nvram_segment_write_tlv(enp, partn, tag, data, size, &segment_data, ¤t_offset, &remaining_original_length, &remaining_modified_length, write); if (rc != 0) { if (current_offset == 0) { /* * If no data has been read then the first * segment is invalid, which is an error. */ goto fail4; } break; } } while (current_offset < partn_size); total_length = segment_data - partn_data; /* * We've run out of space. This should actually be dealt with by * hunt_nvram_buf_write_tlv returning ENOSPC. */ if (total_length > partn_size) { rc = ENOSPC; goto fail5; } /* Erase the whole partition in NVRAM */ if ((rc = hunt_nvram_partn_erase(enp, partn, 0, partn_size)) != 0) goto fail6; /* Write new partition contents from the buffer to NVRAM */ if ((rc = hunt_nvram_partn_write(enp, partn, 0, partn_data, total_length)) != 0) goto fail7; /* Unlock the partition */ hunt_nvram_partn_unlock(enp, partn); EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data); return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); hunt_nvram_partn_unlock(enp, partn); fail3: EFSYS_PROBE(fail3); EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Get the size of a NVRAM partition. This is the total size allocated in nvram, * not the data used by the segments in the partition. */ - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_partn_size( __in efx_nic_t *enp, __in unsigned int partn, __out size_t *sizep) { - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_nvram_info(enp, partn, sizep, NULL, NULL)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_partn_lock( __in efx_nic_t *enp, __in unsigned int partn) { - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_partn_read( __in efx_nic_t *enp, __in unsigned int partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { size_t chunk; - int rc; + efx_rc_t rc; while (size > 0) { chunk = MIN(size, HUNTINGTON_NVRAM_CHUNK); if ((rc = efx_mcdi_nvram_read(enp, partn, offset, data, chunk)) != 0) { goto fail1; } size -= chunk; data += chunk; offset += chunk; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_partn_erase( __in efx_nic_t *enp, __in unsigned int partn, __in unsigned int offset, __in size_t size) { - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_partn_write( __in efx_nic_t *enp, __in unsigned int partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { size_t chunk; - int rc; + efx_rc_t rc; while (size > 0) { chunk = MIN(size, HUNTINGTON_NVRAM_CHUNK); if ((rc = efx_mcdi_nvram_write(enp, partn, offset, data, chunk)) != 0) { goto fail1; } size -= chunk; data += chunk; offset += chunk; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_nvram_partn_unlock( __in efx_nic_t *enp, __in unsigned int partn) { boolean_t reboot; - int rc; + efx_rc_t rc; reboot = B_FALSE; if ((rc = efx_mcdi_nvram_update_finish(enp, partn, reboot)) != 0) goto fail1; return; fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_partn_set_version( __in efx_nic_t *enp, __in unsigned int partn, __in_ecount(4) uint16_t version[4]) { struct tlv_partition_version partn_version; size_t size; - int rc; + efx_rc_t rc; /* Add or modify partition version TLV item */ partn_version.version_w = __CPU_TO_LE_16(version[0]); partn_version.version_x = __CPU_TO_LE_16(version[1]); partn_version.version_y = __CPU_TO_LE_16(version[2]); partn_version.version_z = __CPU_TO_LE_16(version[3]); size = sizeof (partn_version) - (2 * sizeof (uint32_t)); /* Write the version number to all segments in the partition */ if ((rc = hunt_nvram_partn_write_segment_tlv(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, TLV_TAG_PARTITION_VERSION(partn), (caddr_t)&partn_version.version_w, size, B_TRUE)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ #if EFSYS_OPT_NVRAM typedef struct hunt_parttbl_entry_s { unsigned int partn; unsigned int port; efx_nvram_type_t nvtype; } hunt_parttbl_entry_t; /* Translate EFX NVRAM types to firmware partition types */ static hunt_parttbl_entry_t hunt_parttbl[] = { {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 3, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 4, EFX_NVRAM_BOOTROM_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG}, {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG} }; static __checkReturn hunt_parttbl_entry_t * hunt_parttbl_entry( __in efx_nic_t *enp, __in efx_nvram_type_t type) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); hunt_parttbl_entry_t *entry; int i; EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); for (i = 0; i < EFX_ARRAY_SIZE(hunt_parttbl); i++) { entry = &hunt_parttbl[i]; if (entry->port == emip->emi_port && entry->nvtype == type) return (entry); } return (NULL); } #if EFSYS_OPT_DIAG - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_test( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); hunt_parttbl_entry_t *entry; unsigned int npartns = 0; uint32_t *partns = NULL; size_t size; int i; unsigned int j; - int rc; + efx_rc_t rc; /* Find supported partitions */ size = MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM * sizeof (uint32_t); EFSYS_KMEM_ALLOC(enp->en_esip, size, partns); if (partns == NULL) { rc = ENOMEM; goto fail1; } if ((rc = efx_mcdi_nvram_partitions(enp, (caddr_t)partns, size, &npartns)) != 0) { goto fail2; } /* * Iterate over the list of supported partition types * applicable to *this* port */ for (i = 0; i < EFX_ARRAY_SIZE(hunt_parttbl); i++) { entry = &hunt_parttbl[i]; if (entry->port != emip->emi_port) continue; for (j = 0; j < npartns; j++) { if (entry->partn == partns[j]) { rc = efx_mcdi_nvram_test(enp, entry->partn); if (rc != 0) goto fail3; } } } EFSYS_KMEM_FREE(enp->en_esip, size, partns); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, size, partns); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_size( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out size_t *sizep) { hunt_parttbl_entry_t *entry; uint32_t partn; - int rc; + efx_rc_t rc; if ((entry = hunt_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } partn = entry->partn; if ((rc = hunt_nvram_partn_size(enp, partn, sizep)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); *sizep = 0; return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_get_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]) { hunt_parttbl_entry_t *entry; uint32_t partn; - int rc; + efx_rc_t rc; if ((entry = hunt_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } partn = entry->partn; /* FIXME: get highest partn version from all ports */ /* FIXME: return partn description if available */ if ((rc = efx_mcdi_nvram_metadata(enp, partn, subtypep, version, NULL, 0)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_rw_start( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out size_t *chunk_sizep) { hunt_parttbl_entry_t *entry; uint32_t partn; - int rc; + efx_rc_t rc; if ((entry = hunt_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } partn = entry->partn; if ((rc = hunt_nvram_partn_lock(enp, partn)) != 0) goto fail2; if (chunk_sizep != NULL) *chunk_sizep = HUNTINGTON_NVRAM_CHUNK; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_read_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { hunt_parttbl_entry_t *entry; - int rc; + efx_rc_t rc; if ((entry = hunt_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = hunt_nvram_partn_read(enp, entry->partn, offset, data, size)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_erase( __in efx_nic_t *enp, __in efx_nvram_type_t type) { hunt_parttbl_entry_t *entry; size_t size; - int rc; + efx_rc_t rc; if ((entry = hunt_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = hunt_nvram_partn_size(enp, entry->partn, &size)) != 0) goto fail2; if ((rc = hunt_nvram_partn_erase(enp, entry->partn, 0, size)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_write_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __in_bcount(size) caddr_t data, __in size_t size) { hunt_parttbl_entry_t *entry; - int rc; + efx_rc_t rc; if ((entry = hunt_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = hunt_nvram_partn_write(enp, entry->partn, offset, data, size)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_nvram_rw_finish( __in efx_nic_t *enp, __in efx_nvram_type_t type) { hunt_parttbl_entry_t *entry; if ((entry = hunt_parttbl_entry(enp, type)) != NULL) hunt_nvram_partn_unlock(enp, entry->partn); } - __checkReturn int + __checkReturn efx_rc_t hunt_nvram_set_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in_ecount(4) uint16_t version[4]) { hunt_parttbl_entry_t *entry; unsigned int partn; - int rc; + efx_rc_t rc; if ((entry = hunt_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } partn = entry->partn; if ((rc = hunt_nvram_partn_set_version(enp, partn, version)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_NVRAM */ #endif /* EFSYS_OPT_HUNTINGTON */ Index: stable/10/sys/dev/sfxge/common/hunt_phy.c =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_phy.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_phy.c (revision 293927) @@ -1,701 +1,701 @@ /*- * Copyright (c) 2012-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON static void hunt_phy_decode_cap( __in uint32_t mcdi_cap, __out uint32_t *maskp) { /* * TBD: consider common Siena/Hunt function: Hunt is a superset of * Siena here (adds 40G) */ uint32_t mask; mask = 0; if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) mask |= (1 << EFX_PHY_CAP_10HDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) mask |= (1 << EFX_PHY_CAP_10FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) mask |= (1 << EFX_PHY_CAP_100HDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) mask |= (1 << EFX_PHY_CAP_100FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) mask |= (1 << EFX_PHY_CAP_1000HDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_1000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_10000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_40000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) mask |= (1 << EFX_PHY_CAP_PAUSE); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) mask |= (1 << EFX_PHY_CAP_ASYM); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) mask |= (1 << EFX_PHY_CAP_AN); *maskp = mask; } static void hunt_phy_decode_link_mode( __in efx_nic_t *enp, __in uint32_t link_flags, __in unsigned int speed, __in unsigned int fcntl, __out efx_link_mode_t *link_modep, __out unsigned int *fcntlp) { /* * TBD: consider common Siena/Hunt function: Hunt is a superset of * Siena here (adds 40G and generate-only flow control) */ boolean_t fd = !!(link_flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN)); boolean_t up = !!(link_flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); _NOTE(ARGUNUSED(enp)) if (!up) *link_modep = EFX_LINK_DOWN; else if (speed == 40000 && fd) *link_modep = EFX_LINK_40000FDX; else if (speed == 10000 && fd) *link_modep = EFX_LINK_10000FDX; else if (speed == 1000) *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX; else if (speed == 100) *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX; else if (speed == 10) *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX; else *link_modep = EFX_LINK_UNKNOWN; if (fcntl == MC_CMD_FCNTL_OFF) *fcntlp = 0; else if (fcntl == MC_CMD_FCNTL_RESPOND) *fcntlp = EFX_FCNTL_RESPOND; else if (fcntl == MC_CMD_FCNTL_GENERATE) *fcntlp = EFX_FCNTL_GENERATE; else if (fcntl == MC_CMD_FCNTL_BIDIR) *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; else { EFSYS_PROBE1(mc_pcol_error, int, fcntl); *fcntlp = 0; } } void hunt_phy_link_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_link_mode_t *link_modep) { /* * TBD: consider common Siena/Hunt function: Hunt is a superset of * Siena here (adds 40G) */ efx_port_t *epp = &(enp->en_port); unsigned int link_flags; unsigned int speed; unsigned int fcntl; efx_link_mode_t link_mode; uint32_t lp_cap_mask; /* * Convert the LINKCHANGE speed enumeration into mbit/s, in the * same way as GET_LINK encodes the speed */ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) { case MCDI_EVENT_LINKCHANGE_SPEED_100M: speed = 100; break; case MCDI_EVENT_LINKCHANGE_SPEED_1G: speed = 1000; break; case MCDI_EVENT_LINKCHANGE_SPEED_10G: speed = 10000; break; case MCDI_EVENT_LINKCHANGE_SPEED_40G: speed = 40000; break; default: speed = 0; break; } link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS); hunt_phy_decode_link_mode(enp, link_flags, speed, MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL), &link_mode, &fcntl); hunt_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP), &lp_cap_mask); /* * It's safe to update ep_lp_cap_mask without the driver's port lock * because presumably any concurrently running efx_port_poll() is * only going to arrive at the same value. * * ep_fcntl has two meanings. It's either the link common fcntl * (if the PHY supports AN), or it's the forced link state. If * the former, it's safe to update the value for the same reason as * for ep_lp_cap_mask. If the latter, then just ignore the value, * because we can race with efx_mac_fcntl_set(). */ epp->ep_lp_cap_mask = lp_cap_mask; epp->ep_fcntl = fcntl; *link_modep = link_mode; } - __checkReturn int + __checkReturn efx_rc_t hunt_phy_power( __in efx_nic_t *enp, __in boolean_t power) { /* TBD: consider common Siena/Hunt function: essentially identical */ - int rc; + efx_rc_t rc; if (!power) return (0); /* Check if the PHY is a zombie */ if ((rc = hunt_phy_verify(enp)) != 0) goto fail1; enp->en_reset_flags |= EFX_RESET_PHY; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_phy_get_link( __in efx_nic_t *enp, __out hunt_link_state_t *hlsp) { /* * TBD: consider common Siena/Hunt function: Hunt is very similar * (at least for now; not clear that the loopbacks should necessarily * be quite the same...) */ efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN, MC_CMD_GET_LINK_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_LINK; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_LINK_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) { rc = EMSGSIZE; goto fail2; } hunt_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP), &hlsp->hls_adv_cap_mask); hunt_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP), &hlsp->hls_lp_cap_mask); hunt_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS), MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED), MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL), &hlsp->hls_link_mode, &hlsp->hls_fcntl); #if EFSYS_OPT_LOOPBACK /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD); hlsp->hls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE); #endif /* EFSYS_OPT_LOOPBACK */ hlsp->hls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_phy_reconfigure( __in efx_nic_t *enp) { /* * TBD: this is a little different for now (no LED support for Hunt * yet), but ultimately should consider common Siena/Hunt function: * Hunt should be a superset of Siena here (adds 40G) */ efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_port_t *epp = &(enp->en_port); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN, MC_CMD_SET_LINK_OUT_LEN)]; uint32_t cap_mask; unsigned int led_mode; unsigned int speed; - int rc; + efx_rc_t rc; if (~encp->enc_func_flags & EFX_NIC_FUNC_LINKCTRL) goto out; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_LINK; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_LINK_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN; cap_mask = epp->ep_adv_cap_mask; MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP, PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1, PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1, PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1, PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1, PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1, PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1, PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1, PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1, PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1, PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1); /* Too many fields for for POPULATE macros, so insert this afterwards */ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1); #if EFSYS_OPT_LOOPBACK MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, epp->ep_loopback_type); switch (epp->ep_loopback_link_mode) { case EFX_LINK_100FDX: speed = 100; break; case EFX_LINK_1000FDX: speed = 1000; break; case EFX_LINK_10000FDX: speed = 10000; break; case EFX_LINK_40000FDX: speed = 40000; break; default: speed = 0; } #else MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE); speed = 0; #endif /* EFSYS_OPT_LOOPBACK */ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed); #if EFSYS_OPT_PHY_FLAGS MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags); #else MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0); #endif /* EFSYS_OPT_PHY_FLAGS */ efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } /* And set the blink mode */ (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_ID_LED; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN; #if EFSYS_OPT_PHY_LED_CONTROL switch (epp->ep_phy_led_mode) { case EFX_PHY_LED_DEFAULT: led_mode = MC_CMD_LED_DEFAULT; break; case EFX_PHY_LED_OFF: led_mode = MC_CMD_LED_OFF; break; case EFX_PHY_LED_ON: led_mode = MC_CMD_LED_ON; break; default: EFSYS_ASSERT(0); led_mode = MC_CMD_LED_DEFAULT; } MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode); #else MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT); #endif /* EFSYS_OPT_PHY_LED_CONTROL */ efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } out: return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_phy_verify( __in efx_nic_t *enp) { /* TBD: consider common Siena/Hunt function: essentially identical */ efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN, MC_CMD_GET_PHY_STATE_OUT_LEN)]; uint32_t state; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PHY_STATE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) { rc = EMSGSIZE; goto fail2; } state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE); if (state != MC_CMD_PHY_STATE_OK) { if (state != MC_CMD_PHY_STATE_ZOMBIE) EFSYS_PROBE1(mc_pcol_error, int, state); rc = ENOTACTIVE; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip) { _NOTE(ARGUNUSED(enp, ouip)) return (ENOTSUP); } #if EFSYS_OPT_PHY_STATS - __checkReturn int + __checkReturn efx_rc_t hunt_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat) { /* TBD: no stats support in firmware yet */ _NOTE(ARGUNUSED(enp, esmp)) memset(stat, 0, EFX_PHY_NSTATS * sizeof (*stat)); return (0); } #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES extern const char * hunt_phy_prop_name( __in efx_nic_t *enp, __in unsigned int id) { _NOTE(ARGUNUSED(enp, id)) return (NULL); } #endif /* EFSYS_OPT_NAMES */ -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_phy_prop_get( __in efx_nic_t *enp, __in unsigned int id, __in uint32_t flags, __out uint32_t *valp) { _NOTE(ARGUNUSED(enp, id, flags, valp)) return (ENOTSUP); } -extern __checkReturn int +extern __checkReturn efx_rc_t hunt_phy_prop_set( __in efx_nic_t *enp, __in unsigned int id, __in uint32_t val) { _NOTE(ARGUNUSED(enp, id, val)) return (ENOTSUP); } #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST - __checkReturn int + __checkReturn efx_rc_t hunt_bist_enable_offline( __in efx_nic_t *enp) { - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_bist_enable_offline(enp)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type) { - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_bist_start(enp, type)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt __drv_when(count > 0, __notnull) uint32_t *value_maskp, __out_ecount_opt(count) __drv_when(count > 0, __notnull) unsigned long *valuesp, __in size_t count) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN, MCDI_CTL_SDU_LEN_MAX)]; uint32_t value_mask = 0; uint32_t result; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_POLL_BIST; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MCDI_CTL_SDU_LEN_MAX; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) { rc = EMSGSIZE; goto fail2; } if (count > 0) (void) memset(valuesp, '\0', count * sizeof (unsigned long)); result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT); if (result == MC_CMD_POLL_BIST_FAILED && req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MEM_LEN && count > EFX_BIST_MEM_ECC_FATAL) { if (valuesp != NULL) { valuesp[EFX_BIST_MEM_TEST] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_TEST); valuesp[EFX_BIST_MEM_ADDR] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ADDR); valuesp[EFX_BIST_MEM_BUS] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_BUS); valuesp[EFX_BIST_MEM_EXPECT] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_EXPECT); valuesp[EFX_BIST_MEM_ACTUAL] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ACTUAL); valuesp[EFX_BIST_MEM_ECC] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC); valuesp[EFX_BIST_MEM_ECC_PARITY] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_PARITY); valuesp[EFX_BIST_MEM_ECC_FATAL] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_FATAL); } value_mask |= (1 << EFX_BIST_MEM_TEST) | (1 << EFX_BIST_MEM_ADDR) | (1 << EFX_BIST_MEM_BUS) | (1 << EFX_BIST_MEM_EXPECT) | (1 << EFX_BIST_MEM_ACTUAL) | (1 << EFX_BIST_MEM_ECC) | (1 << EFX_BIST_MEM_ECC_PARITY) | (1 << EFX_BIST_MEM_ECC_FATAL); } else if (result == MC_CMD_POLL_BIST_FAILED && encp->enc_phy_type == EFX_PHY_XFI_FARMI && req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN && count > EFX_BIST_FAULT_CODE) { if (valuesp != NULL) valuesp[EFX_BIST_FAULT_CODE] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST); value_mask |= 1 << EFX_BIST_FAULT_CODE; } if (value_maskp != NULL) *value_maskp = value_mask; EFSYS_ASSERT(resultp != NULL); if (result == MC_CMD_POLL_BIST_RUNNING) *resultp = EFX_BIST_RESULT_RUNNING; else if (result == MC_CMD_POLL_BIST_PASSED) *resultp = EFX_BIST_RESULT_PASSED; else *resultp = EFX_BIST_RESULT_FAILED; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type) { /* There is no way to stop BIST on Huntinton. */ _NOTE(ARGUNUSED(enp, type)) } #endif /* EFSYS_OPT_BIST */ #endif /* EFSYS_OPT_HUNTINGTON */ Index: stable/10/sys/dev/sfxge/common/hunt_rx.c =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_rx.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_rx.c (revision 293927) @@ -1,765 +1,765 @@ /*- * Copyright (c) 2012-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_init_rxq( __in efx_nic_t *enp, __in uint32_t size, __in uint32_t target_evq, __in uint32_t label, __in uint32_t instance, __in efsys_mem_t *esmp) { efx_mcdi_req_t req; uint8_t payload[ MAX(MC_CMD_INIT_RXQ_IN_LEN(EFX_RXQ_NBUFS(EFX_RXQ_MAXNDESCS)), MC_CMD_INIT_RXQ_OUT_LEN)]; int npages = EFX_RXQ_NBUFS(size); int i; efx_qword_t *dma_addr; uint64_t addr; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_RXQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_RXQ_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_RXQ_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_SIZE, size); MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_TARGET_EVQ, target_evq); MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_LABEL, label); MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_INSTANCE, instance); MCDI_IN_POPULATE_DWORD_5(req, INIT_RXQ_IN_FLAGS, INIT_RXQ_IN_FLAG_BUFF_MODE, 0, INIT_RXQ_IN_FLAG_HDR_SPLIT, 0, INIT_RXQ_IN_FLAG_TIMESTAMP, 0, INIT_RXQ_IN_CRC_MODE, 0, INIT_RXQ_IN_FLAG_PREFIX, 1); MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_OWNER_ID, 0); MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_fini_rxq( __in efx_nic_t *enp, __in uint32_t instance) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN, MC_CMD_FINI_RXQ_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FINI_RXQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN; MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance); efx_mcdi_execute(enp, &req); if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_RX_SCALE -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_rss_context_alloc( __in efx_nic_t *enp, __out uint32_t *rss_contextp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)]; uint32_t rss_context; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, EVB_PORT_ID_ASSIGNED); MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE); /* NUM_QUEUES is only used to validate indirection table offsets */ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, 64); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) { rc = EMSGSIZE; goto fail2; } rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); if (rss_context == HUNTINGTON_RSS_CONTEXT_INVALID) { rc = ENOENT; goto fail3; } *rss_contextp = rss_context; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE -static int +static efx_rc_t efx_mcdi_rss_context_free( __in efx_nic_t *enp, __in uint32_t rss_context) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN, MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)]; - int rc; + efx_rc_t rc; if (rss_context == HUNTINGTON_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE -static int +static efx_rc_t efx_mcdi_rss_context_set_flags( __in efx_nic_t *enp, __in uint32_t rss_context, __in efx_rx_hash_type_t type) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN, MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)]; - int rc; + efx_rc_t rc; if (rss_context == HUNTINGTON_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, rss_context); MCDI_IN_POPULATE_DWORD_4(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN, (type & (1U << EFX_RX_HASH_IPV4)) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN, (type & (1U << EFX_RX_HASH_TCPIPV4)) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN, (type & (1U << EFX_RX_HASH_IPV6)) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN, (type & (1U << EFX_RX_HASH_TCPIPV6)) ? 1 : 0); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE -static int +static efx_rc_t efx_mcdi_rss_context_set_key( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) uint8_t *key, __in size_t n) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN, MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)]; - int rc; + efx_rc_t rc; if (rss_context == HUNTINGTON_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, rss_context); EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) { rc = EINVAL; goto fail2; } memcpy(MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY), key, n); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE -static int +static efx_rc_t efx_mcdi_rss_context_set_table( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) unsigned int *table, __in size_t n) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN, MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)]; uint8_t *req_table; int i, rc; if (rss_context == HUNTINGTON_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, rss_context); req_table = MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE); for (i = 0; i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN; i++) { req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ - __checkReturn int + __checkReturn efx_rc_t hunt_rx_init( __in efx_nic_t *enp) { #if EFSYS_OPT_RX_SCALE if (efx_mcdi_rss_context_alloc(enp, &enp->en_rss_context) == 0) { /* * Allocated an exclusive RSS context, which allows both the * indirection table and key to be modified. */ enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE; enp->en_hash_support = EFX_RX_HASH_AVAILABLE; } else { /* * Failed to allocate an exclusive RSS context. Continue * operation without support for RSS. The pseudo-header in * received packets will not contain a Toeplitz hash value. */ enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE; enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE; } #endif /* EFSYS_OPT_RX_SCALE */ return (0); } #if EFSYS_OPT_RX_HDR_SPLIT - __checkReturn int + __checkReturn efx_rc_t hunt_rx_hdr_split_enable( __in efx_nic_t *enp, __in unsigned int hdr_buf_size, __in unsigned int pld_buf_size) { - int rc; + efx_rc_t rc; /* FIXME */ _NOTE(ARGUNUSED(enp, hdr_buf_size, pld_buf_size)) if (B_FALSE) { rc = ENOTSUP; goto fail1; } /* FIXME */ return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_HDR_SPLIT */ #if EFSYS_OPT_RX_SCATTER - __checkReturn int + __checkReturn efx_rc_t hunt_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size) { _NOTE(ARGUNUSED(enp, buf_size)) return (0); } #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE - __checkReturn int + __checkReturn efx_rc_t hunt_rx_scale_mode_set( __in efx_nic_t *enp, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert) { - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(alg, ==, EFX_RX_HASHALG_TOEPLITZ); EFSYS_ASSERT3U(insert, ==, B_TRUE); if ((alg != EFX_RX_HASHALG_TOEPLITZ) || (insert == B_FALSE)) { rc = EINVAL; goto fail1; } if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) { rc = ENOTSUP; goto fail2; } if ((rc = efx_mcdi_rss_context_set_flags(enp, enp->en_rss_context, type)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE - __checkReturn int + __checkReturn efx_rc_t hunt_rx_scale_key_set( __in efx_nic_t *enp, __in_ecount(n) uint8_t *key, __in size_t n) { - int rc; + efx_rc_t rc; if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) { rc = ENOTSUP; goto fail1; } if ((rc = efx_mcdi_rss_context_set_key(enp, enp->en_rss_context, key, n)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE - __checkReturn int + __checkReturn efx_rc_t hunt_rx_scale_tbl_set( __in efx_nic_t *enp, __in_ecount(n) unsigned int *table, __in size_t n) { - int rc; + efx_rc_t rc; if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) { rc = ENOTSUP; goto fail1; } if ((rc = efx_mcdi_rss_context_set_table(enp, enp->en_rss_context, table, n)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ void hunt_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added) { efx_qword_t qword; unsigned int i; unsigned int offset; unsigned int id; /* The client driver must not overfill the queue */ EFSYS_ASSERT3U(added - completed + n, <=, EFX_RXQ_LIMIT(erp->er_mask + 1)); id = added & (erp->er_mask); for (i = 0; i < n; i++) { EFSYS_PROBE4(rx_post, unsigned int, erp->er_index, unsigned int, id, efsys_dma_addr_t, addrp[i], size_t, size); EFX_POPULATE_QWORD_3(qword, ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size), ESF_DZ_RX_KER_BUF_ADDR_DW0, (uint32_t)(addrp[i] & 0xffffffff), ESF_DZ_RX_KER_BUF_ADDR_DW1, (uint32_t)(addrp[i] >> 32)); offset = id * sizeof (efx_qword_t); EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword); id = (id + 1) & (erp->er_mask); } } void hunt_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp) { efx_nic_t *enp = erp->er_enp; unsigned int pushed = *pushedp; uint32_t wptr; efx_dword_t dword; /* Hardware has alignment restriction for WPTR */ wptr = P2ALIGN(added, HUNTINGTON_RX_WPTR_ALIGN); if (pushed == wptr) return; *pushedp = wptr; /* Push the populated descriptors out */ wptr &= erp->er_mask; EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr); /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1, wptr, pushed & erp->er_mask); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, erp->er_index, &dword, B_FALSE); } - __checkReturn int + __checkReturn efx_rc_t hunt_rx_qflush( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_rx_qenable( __in efx_rxq_t *erp) { /* FIXME */ _NOTE(ARGUNUSED(erp)) /* FIXME */ } - __checkReturn int + __checkReturn efx_rc_t hunt_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __in efx_rxq_t *erp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); - int rc; + efx_rc_t rc; _NOTE(ARGUNUSED(erp)) EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH)); EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS); EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit); EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS)); EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS)); if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_rxq_limit) { rc = EINVAL; goto fail2; } /* * FIXME: Siena code handles different queue types (default, header * split, scatter); we'll need to do something more here later, but * all that stuff is TBD for now. */ if ((rc = efx_mcdi_init_rxq(enp, n, eep->ee_index, label, index, esmp)) != 0) goto fail3; erp->er_eep = eep; erp->er_label = label; hunt_ev_rxlabel_init(eep, erp, label); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_rx_qdestroy( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_evq_t *eep = erp->er_eep; unsigned int label = erp->er_label; hunt_ev_rxlabel_fini(eep, label); EFSYS_ASSERT(enp->en_rx_qcount != 0); --enp->en_rx_qcount; EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp); } void hunt_rx_fini( __in efx_nic_t *enp) { #if EFSYS_OPT_RX_SCALE if (enp->en_rss_support != EFX_RX_SCALE_UNAVAILABLE) { (void) efx_mcdi_rss_context_free(enp, enp->en_rss_context); } enp->en_rss_context = 0; enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE; #else _NOTE(ARGUNUSED(enp)) #endif /* EFSYS_OPT_RX_SCALE */ } #endif /* EFSYS_OPT_HUNTINGTON */ Index: stable/10/sys/dev/sfxge/common/hunt_sram.c =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_sram.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_sram.c (revision 293927) @@ -1,69 +1,69 @@ /*- * Copyright (c) 2012-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON #if EFSYS_OPT_DIAG - __checkReturn int + __checkReturn efx_rc_t hunt_sram_test( __in efx_nic_t *enp, __in efx_sram_pattern_fn_t func) { - int rc; + efx_rc_t rc; /* FIXME */ _NOTE(ARGUNUSED(enp)) _NOTE(ARGUNUSED(func)) if (B_FALSE) { rc = ENOTSUP; goto fail1; } /* FIXME */ return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ #endif /* EFSYS_OPT_HUNTINGTON */ Index: stable/10/sys/dev/sfxge/common/hunt_tx.c =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_tx.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_tx.c (revision 293927) @@ -1,679 +1,679 @@ /*- * Copyright (c) 2012-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON #if EFSYS_OPT_QSTATS #define EFX_TX_QSTAT_INCR(_etp, _stat) \ do { \ (_etp)->et_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_TX_QSTAT_INCR(_etp, _stat) #endif -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_init_txq( __in efx_nic_t *enp, __in uint32_t size, __in uint32_t target_evq, __in uint32_t label, __in uint32_t instance, __in uint16_t flags, __in efsys_mem_t *esmp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS), MC_CMD_INIT_TXQ_OUT_LEN)]; efx_qword_t *dma_addr; uint64_t addr; int npages; int i; - int rc; + efx_rc_t rc; EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >= EFX_TXQ_NBUFS(EFX_TXQ_MAXNDESCS(&enp->en_nic_cfg))); npages = EFX_TXQ_NBUFS(size); if (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_TXQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance); MCDI_IN_POPULATE_DWORD_6(req, INIT_TXQ_IN_FLAGS, INIT_TXQ_IN_FLAG_BUFF_MODE, 0, INIT_TXQ_IN_FLAG_IP_CSUM_DIS, (flags & EFX_CKSUM_IPV4) ? 0 : 1, INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, (flags & EFX_CKSUM_TCPUDP) ? 0 : 1, INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, INIT_TXQ_IN_CRC_MODE, 0, INIT_TXQ_IN_FLAG_TIMESTAMP, 0); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_fini_txq( __in efx_nic_t *enp, __in uint32_t instance) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN, MC_CMD_FINI_TXQ_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FINI_TXQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN; MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance); efx_mcdi_execute(enp, &req); if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) { rc = req.emr_rc; goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_tx_init( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) return (0); } void hunt_tx_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } - __checkReturn int + __checkReturn efx_rc_t hunt_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __in efx_txq_t *etp, __out unsigned int *addedp) { efx_qword_t desc; - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags, esmp)) != 0) goto fail1; /* * A previous user of this TX queue may have written a descriptor to the * TX push collector, but not pushed the doorbell (e.g. after a crash). * The next doorbell write would then push the stale descriptor. * * Ensure the (per network port) TX push collector is cleared by writing * a no-op TX option descriptor. See bug29981 for details. */ *addedp = 1; EFX_POPULATE_QWORD_4(desc, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, ESF_DZ_TX_OPTION_UDP_TCP_CSUM, (flags & EFX_CKSUM_TCPUDP) ? 1 : 0, ESF_DZ_TX_OPTION_IP_CSUM, (flags & EFX_CKSUM_IPV4) ? 1 : 0); EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc); hunt_tx_qpush(etp, *addedp, 0); return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_tx_qdestroy( __in efx_txq_t *etp) { /* FIXME */ _NOTE(ARGUNUSED(etp)) /* FIXME */ } - __checkReturn int + __checkReturn efx_rc_t hunt_tx_qpio_enable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_piobuf_handle_t handle; - int rc; + efx_rc_t rc; if (etp->et_pio_size != 0) { rc = EALREADY; goto fail1; } /* Sub-allocate a PIO block from a piobuf */ if ((rc = hunt_nic_pio_alloc(enp, &etp->et_pio_bufnum, &handle, &etp->et_pio_blknum, &etp->et_pio_offset, &etp->et_pio_size)) != 0) { goto fail2; } EFSYS_ASSERT3U(etp->et_pio_size, !=, 0); /* Link the piobuf to this TXQ */ if ((rc = hunt_nic_pio_link(enp, etp->et_index, handle)) != 0) { goto fail3; } /* * et_pio_offset is the offset of the sub-allocated block within the * hardware PIO buffer. It is used as the buffer address in the PIO * option descriptor. * * et_pio_write_offset is the offset of the sub-allocated block from the * start of the write-combined memory mapping, and is used for writing * data into the PIO buffer. */ etp->et_pio_write_offset = (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) + ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset; return (0); fail3: EFSYS_PROBE(fail3); hunt_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); etp->et_pio_size = 0; fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_tx_qpio_disable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; if (etp->et_pio_size != 0) { /* Unlink the piobuf from this TXQ */ hunt_nic_pio_unlink(enp, etp->et_index); /* Free the sub-allocated PIO block */ hunt_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); etp->et_pio_size = 0; etp->et_pio_write_offset = 0; } } - __checkReturn int + __checkReturn efx_rc_t hunt_tx_qpio_write( __in efx_txq_t *etp, __in_ecount(length) uint8_t *buffer, __in size_t length, __in size_t offset) { efx_nic_t *enp = etp->et_enp; efsys_bar_t *esbp = enp->en_esbp; uint32_t write_offset; uint32_t write_offset_limit; efx_qword_t *eqp; - int rc; + efx_rc_t rc; EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0); if (etp->et_pio_size == 0) { rc = ENOENT; goto fail1; } if (offset + length > etp->et_pio_size) { rc = ENOSPC; goto fail2; } /* * Writes to PIO buffers must be 64 bit aligned, and multiples of * 64 bits. */ write_offset = etp->et_pio_write_offset + offset; write_offset_limit = write_offset + length; eqp = (efx_qword_t *)buffer; while (write_offset < write_offset_limit) { EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp); eqp++; write_offset += sizeof (efx_qword_t); } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_tx_qpio_post( __in efx_txq_t *etp, __in size_t pkt_length, __in unsigned int completed, __inout unsigned int *addedp) { efx_qword_t pio_desc; unsigned int id; size_t offset; unsigned int added = *addedp; - int rc; + efx_rc_t rc; if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } if (etp->et_pio_size == 0) { rc = ENOENT; goto fail2; } id = added++ & etp->et_mask; offset = id * sizeof (efx_qword_t); EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index, unsigned int, id, uint32_t, etp->et_pio_offset, size_t, pkt_length); EFX_POPULATE_QWORD_5(pio_desc, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, 1, ESF_DZ_TX_PIO_CONT, 0, ESF_DZ_TX_PIO_BYTE_CNT, pkt_length, ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset); EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc); EFX_TX_QSTAT_INCR(etp, TX_POST_PIO); *addedp = added; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_tx_qpost( __in efx_txq_t *etp, __in_ecount(n) efx_buffer_t *eb, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; - int rc; + efx_rc_t rc; if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } for (i = 0; i < n; i++) { efx_buffer_t *ebp = &eb[i]; efsys_dma_addr_t addr = ebp->eb_addr; size_t size = ebp->eb_size; boolean_t eop = ebp->eb_eop; unsigned int id; size_t offset; efx_qword_t qword; /* Fragments must not span 4k boundaries. */ EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= (addr + size)); id = added++ & etp->et_mask; offset = id * sizeof (efx_qword_t); EFSYS_PROBE5(tx_post, unsigned int, etp->et_index, unsigned int, id, efsys_dma_addr_t, addr, size_t, size, boolean_t, eop); EFX_POPULATE_QWORD_5(qword, ESF_DZ_TX_KER_TYPE, 0, ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword); } EFX_TX_QSTAT_INCR(etp, TX_POST); *addedp = added; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * This improves performance by pushing a TX descriptor at the same time as the * doorbell. The descriptor must be added to the TXQ, so that can be used if the * hardware decides not to use the pushed descriptor. */ void hunt_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed) { efx_nic_t *enp = etp->et_enp; unsigned int wptr; unsigned int id; size_t offset; efx_qword_t desc; efx_oword_t oword; wptr = added & etp->et_mask; id = pushed & etp->et_mask; offset = id * sizeof (efx_qword_t); EFSYS_MEM_READQ(etp->et_esmp, offset, &desc); EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr, ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1), ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0)); /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index, &oword); } - __checkReturn int + __checkReturn efx_rc_t hunt_tx_qdesc_post( __in efx_txq_t *etp, __in_ecount(n) efx_desc_t *ed, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; - int rc; + efx_rc_t rc; if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } for (i = 0; i < n; i++) { efx_desc_t *edp = &ed[i]; unsigned int id; size_t offset; id = added++ & etp->et_mask; offset = id * sizeof (efx_desc_t); EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq); } EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index, unsigned int, added, unsigned int, n); EFX_TX_QSTAT_INCR(etp, TX_POST); *addedp = added; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp) { /* Fragments must not span 4k boundaries. */ EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size); EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index, efsys_dma_addr_t, addr, size_t, size, boolean_t, eop); EFX_POPULATE_QWORD_5(edp->ed_eq, ESF_DZ_TX_KER_TYPE, 0, ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); } void hunt_tx_qdesc_tso_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint8_t tcp_flags, __out efx_desc_t *edp) { EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index, uint16_t, ipv4_id, uint32_t, tcp_seq, uint8_t, tcp_flags); EFX_POPULATE_QWORD_5(edp->ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, ESF_DZ_TX_TSO_IP_ID, ipv4_id, ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); } void hunt_tx_qdesc_vlantci_create( __in efx_txq_t *etp, __in uint16_t tci, __out efx_desc_t *edp) { EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index, uint16_t, tci); EFX_POPULATE_QWORD_4(edp->ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_VLAN, ESF_DZ_TX_VLAN_OP, tci ? 1 : 0, ESF_DZ_TX_VLAN_TAG1, tci); } - __checkReturn int + __checkReturn efx_rc_t hunt_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns) { - int rc; + efx_rc_t rc; /* FIXME */ _NOTE(ARGUNUSED(etp, ns)) if (B_FALSE) { rc = ENOTSUP; goto fail1; } /* FIXME */ return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_tx_qflush( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_tx_qenable( __in efx_txq_t *etp) { /* FIXME */ _NOTE(ARGUNUSED(etp)) /* FIXME */ } #if EFSYS_OPT_QSTATS void hunt_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) { /* * TBD: Consider a common Siena/Huntington function. The code is * essentially identical. */ unsigned int id; for (id = 0; id < TX_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, etp->et_stat[id]); etp->et_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ #endif /* EFSYS_OPT_HUNTINGTON */ Index: stable/10/sys/dev/sfxge/common/hunt_vpd.c =================================================================== --- stable/10/sys/dev/sfxge/common/hunt_vpd.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/hunt_vpd.c (revision 293927) @@ -1,435 +1,435 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_VPD #if EFSYS_OPT_HUNTINGTON #include "ef10_tlv_layout.h" - __checkReturn int + __checkReturn efx_rc_t hunt_vpd_init( __in efx_nic_t *enp) { caddr_t svpd; size_t svpd_size; uint32_t pci_pf; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); pci_pf = enp->en_nic_cfg.enc_pf; /* * The VPD interface exposes VPD resources from the combined static and * dynamic VPD storage. As the static VPD configuration should *never* * change, we can cache it. */ svpd = NULL; svpd_size = 0; rc = hunt_nvram_partn_read_tlv(enp, NVRAM_PARTITION_TYPE_STATIC_CONFIG, TLV_TAG_PF_STATIC_VPD(pci_pf), &svpd, &svpd_size); if (rc != 0) { if (rc == EACCES) { /* Unpriviledged functions cannot access VPD */ goto out; } goto fail1; } if (svpd != NULL && svpd_size > 0) { if ((rc = efx_vpd_hunk_verify(svpd, svpd_size, NULL)) != 0) goto fail2; } enp->en_u.hunt.enu_svpd = svpd; enp->en_u.hunt.enu_svpd_length = svpd_size; out: return (0); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, svpd_size, svpd); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_vpd_size( __in efx_nic_t *enp, __out size_t *sizep) { - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); /* * This function returns the total size the user should allocate * for all VPD operations. We've already cached the static vpd, * so we just need to return an upper bound on the dynamic vpd, * which is the size of the DYNAMIC_CONFIG partition. */ if ((rc = efx_mcdi_nvram_info(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, sizep, NULL, NULL)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size) { caddr_t dvpd; size_t dvpd_size; uint32_t pci_pf; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); pci_pf = enp->en_nic_cfg.enc_pf; if ((rc = hunt_nvram_partn_read_tlv(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, TLV_TAG_PF_DYNAMIC_VPD(pci_pf), &dvpd, &dvpd_size)) != 0) goto fail1; if (dvpd_size > size) { rc = ENOSPC; goto fail2; } memcpy(data, dvpd, dvpd_size); /* Pad data with all-1s, consistent with update operations */ memset(data + dvpd_size, 0xff, size - dvpd_size); EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd); return (0); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { efx_vpd_tag_t stag; efx_vpd_tag_t dtag; efx_vpd_keyword_t skey; efx_vpd_keyword_t dkey; unsigned int scont; unsigned int dcont; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); /* * Strictly you could take the view that dynamic vpd is optional. * Instead, to conform more closely to the read/verify/reinit() * paradigm, we require dynamic vpd. hunt_vpd_reinit() will * reinitialize it as required. */ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0) goto fail1; /* * Verify that there is no duplication between the static and * dynamic cfg sectors. */ if (enp->en_u.hunt.enu_svpd_length == 0) goto done; dcont = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_hunk_next(data, size, &dtag, &dkey, NULL, NULL, &dcont)) != 0) goto fail2; if (dcont == 0) break; scont = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_hunk_next( enp->en_u.hunt.enu_svpd, enp->en_u.hunt.enu_svpd_length, &stag, &skey, NULL, NULL, &scont)) != 0) goto fail3; if (scont == 0) break; if (stag == dtag && skey == dkey) { rc = EEXIST; goto fail4; } } } done: return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { boolean_t wantpid; - int rc; + efx_rc_t rc; /* * Only create an ID string if the dynamic cfg doesn't have one */ if (enp->en_u.hunt.enu_svpd_length == 0) wantpid = B_TRUE; else { unsigned int offset; uint8_t length; rc = efx_vpd_hunk_get(enp->en_u.hunt.enu_svpd, enp->en_u.hunt.enu_svpd_length, EFX_VPD_ID, 0, &offset, &length); if (rc == 0) wantpid = B_FALSE; else if (rc == ENOENT) wantpid = B_TRUE; else goto fail1; } if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp) { unsigned int offset; uint8_t length; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); /* Attempt to satisfy the request from svpd first */ if (enp->en_u.hunt.enu_svpd_length > 0) { if ((rc = efx_vpd_hunk_get(enp->en_u.hunt.enu_svpd, enp->en_u.hunt.enu_svpd_length, evvp->evv_tag, evvp->evv_keyword, &offset, &length)) == 0) { evvp->evv_length = length; memcpy(evvp->evv_value, enp->en_u.hunt.enu_svpd + offset, length); return (0); } else if (rc != ENOENT) goto fail1; } /* And then from the provided data buffer */ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag, evvp->evv_keyword, &offset, &length)) != 0) goto fail2; evvp->evv_length = length; memcpy(evvp->evv_value, data + offset, length); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_vpd_set( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp) { - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); /* If the provided (tag,keyword) exists in svpd, then it is readonly */ if (enp->en_u.hunt.enu_svpd_length > 0) { unsigned int offset; uint8_t length; if ((rc = efx_vpd_hunk_get(enp->en_u.hunt.enu_svpd, enp->en_u.hunt.enu_svpd_length, evvp->evv_tag, evvp->evv_keyword, &offset, &length)) == 0) { rc = EACCES; goto fail1; } } if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t hunt_vpd_next( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp) { _NOTE(ARGUNUSED(enp, data, size, evvp, contp)) return (ENOTSUP); } - __checkReturn int + __checkReturn efx_rc_t hunt_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { size_t vpd_length; uint32_t pci_pf; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); pci_pf = enp->en_nic_cfg.enc_pf; /* Determine total length of new dynamic VPD */ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0) goto fail1; /* Store new dynamic VPD in all segments in DYNAMIC_CONFIG partition */ if ((rc = hunt_nvram_partn_write_segment_tlv(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, TLV_TAG_PF_DYNAMIC_VPD(pci_pf), data, vpd_length, B_TRUE)) != 0) { goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void hunt_vpd_fini( __in efx_nic_t *enp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); if (enp->en_u.hunt.enu_svpd_length > 0) { EFSYS_KMEM_FREE(enp->en_esip, enp->en_u.hunt.enu_svpd_length, enp->en_u.hunt.enu_svpd); enp->en_u.hunt.enu_svpd = NULL; enp->en_u.hunt.enu_svpd_length = 0; } } #endif /* EFSYS_OPT_HUNTINGTON */ #endif /* EFSYS_OPT_VPD */ Index: stable/10/sys/dev/sfxge/common/mcdi_mon.c =================================================================== --- stable/10/sys/dev/sfxge/common/mcdi_mon.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/mcdi_mon.c (revision 293927) @@ -1,552 +1,552 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_MON_MCDI #if EFSYS_OPT_MON_STATS #define MCDI_MON_NEXT_PAGE (uint16_t)0xfffe #define MCDI_MON_INVALID_SENSOR (uint16_t)0xfffd #define MCDI_MON_PAGE_SIZE 0x20 /* Bitmasks of valid port(s) for each sensor */ #define MCDI_MON_PORT_NONE (0x00) #define MCDI_MON_PORT_P1 (0x01) #define MCDI_MON_PORT_P2 (0x02) #define MCDI_MON_PORT_P3 (0x04) #define MCDI_MON_PORT_P4 (0x08) #define MCDI_MON_PORT_Px (0xFFFF) /* Entry for MCDI sensor in sensor map */ #define STAT(portmask, stat) \ { (MCDI_MON_PORT_##portmask), (EFX_MON_STAT_##stat) } /* Entry for sensor next page flag in sensor map */ #define STAT_NEXT_PAGE() \ { MCDI_MON_PORT_NONE, MCDI_MON_NEXT_PAGE } /* Placeholder for gaps in the array */ #define STAT_NO_SENSOR() \ { MCDI_MON_PORT_NONE, MCDI_MON_INVALID_SENSOR } /* Map from MC sensors to monitor statistics */ static const struct mcdi_sensor_map_s { uint16_t msm_port_mask; uint16_t msm_stat; } mcdi_sensor_map[] = { /* Sensor page 0 MC_CMD_SENSOR_xxx */ STAT(Px, INT_TEMP), /* 0x00 CONTROLLER_TEMP */ STAT(Px, EXT_TEMP), /* 0x01 PHY_COMMON_TEMP */ STAT(Px, INT_COOLING), /* 0x02 CONTROLLER_COOLING */ STAT(P1, EXT_TEMP), /* 0x03 PHY0_TEMP */ STAT(P1, EXT_COOLING), /* 0x04 PHY0_COOLING */ STAT(P2, EXT_TEMP), /* 0x05 PHY1_TEMP */ STAT(P2, EXT_COOLING), /* 0x06 PHY1_COOLING */ STAT(Px, 1V), /* 0x07 IN_1V0 */ STAT(Px, 1_2V), /* 0x08 IN_1V2 */ STAT(Px, 1_8V), /* 0x09 IN_1V8 */ STAT(Px, 2_5V), /* 0x0a IN_2V5 */ STAT(Px, 3_3V), /* 0x0b IN_3V3 */ STAT(Px, 12V), /* 0x0c IN_12V0 */ STAT(Px, 1_2VA), /* 0x0d IN_1V2A */ STAT(Px, VREF), /* 0x0e IN_VREF */ STAT(Px, VAOE), /* 0x0f OUT_VAOE */ STAT(Px, AOE_TEMP), /* 0x10 AOE_TEMP */ STAT(Px, PSU_AOE_TEMP), /* 0x11 PSU_AOE_TEMP */ STAT(Px, PSU_TEMP), /* 0x12 PSU_TEMP */ STAT(Px, FAN0), /* 0x13 FAN_0 */ STAT(Px, FAN1), /* 0x14 FAN_1 */ STAT(Px, FAN2), /* 0x15 FAN_2 */ STAT(Px, FAN3), /* 0x16 FAN_3 */ STAT(Px, FAN4), /* 0x17 FAN_4 */ STAT(Px, VAOE_IN), /* 0x18 IN_VAOE */ STAT(Px, IAOE), /* 0x19 OUT_IAOE */ STAT(Px, IAOE_IN), /* 0x1a IN_IAOE */ STAT(Px, NIC_POWER), /* 0x1b NIC_POWER */ STAT(Px, 0_9V), /* 0x1c IN_0V9 */ STAT(Px, I0_9V), /* 0x1d IN_I0V9 */ STAT(Px, I1_2V), /* 0x1e IN_I1V2 */ STAT_NEXT_PAGE(), /* 0x1f Next page flag (not a sensor) */ /* Sensor page 1 MC_CMD_SENSOR_xxx */ STAT(Px, 0_9V_ADC), /* 0x20 IN_0V9_ADC */ STAT(Px, INT_TEMP2), /* 0x21 CONTROLLER_2_TEMP */ STAT(Px, VREG_TEMP), /* 0x22 VREG_INTERNAL_TEMP */ STAT(Px, VREG_0_9V_TEMP), /* 0x23 VREG_0V9_TEMP */ STAT(Px, VREG_1_2V_TEMP), /* 0x24 VREG_1V2_TEMP */ STAT(Px, INT_VPTAT), /* 0x25 CTRLR. VPTAT */ STAT(Px, INT_ADC_TEMP), /* 0x26 CTRLR. INTERNAL_TEMP */ STAT(Px, EXT_VPTAT), /* 0x27 CTRLR. VPTAT_EXTADC */ STAT(Px, EXT_ADC_TEMP), /* 0x28 CTRLR. INTERNAL_TEMP_EXTADC */ STAT(Px, AMBIENT_TEMP), /* 0x29 AMBIENT_TEMP */ STAT(Px, AIRFLOW), /* 0x2a AIRFLOW */ STAT(Px, VDD08D_VSS08D_CSR), /* 0x2b VDD08D_VSS08D_CSR */ STAT(Px, VDD08D_VSS08D_CSR_EXTADC), /* 0x2c VDD08D_VSS08D_CSR_EXTADC */ STAT(Px, HOTPOINT_TEMP), /* 0x2d HOTPOINT_TEMP */ STAT(P1, PHY_POWER_SWITCH_PORT0), /* 0x2e PHY_POWER_SWITCH_PORT0 */ STAT(P2, PHY_POWER_SWITCH_PORT1), /* 0x2f PHY_POWER_SWITCH_PORT1 */ STAT(Px, MUM_VCC), /* 0x30 MUM_VCC */ STAT(Px, 0V9_A), /* 0x31 0V9_A */ STAT(Px, I0V9_A), /* 0x32 I0V9_A */ STAT(Px, 0V9_A_TEMP), /* 0x33 0V9_A_TEMP */ STAT(Px, 0V9_B), /* 0x34 0V9_B */ STAT(Px, I0V9_B), /* 0x35 I0V9_B */ STAT(Px, 0V9_B_TEMP), /* 0x36 0V9_B_TEMP */ STAT(Px, CCOM_AVREG_1V2_SUPPLY), /* 0x37 CCOM_AVREG_1V2_SUPPLY */ STAT(Px, CCOM_AVREG_1V2_SUPPLY_EXT_ADC), /* 0x38 CCOM_AVREG_1V2_SUPPLY_EXT_ADC */ STAT(Px, CCOM_AVREG_1V8_SUPPLY), /* 0x39 CCOM_AVREG_1V8_SUPPLY */ STAT(Px, CCOM_AVREG_1V8_SUPPLY_EXT_ADC), /* 0x3a CCOM_AVREG_1V8_SUPPLY_EXT_ADC */ STAT_NO_SENSOR(), /* 0x3b (no sensor) */ STAT_NO_SENSOR(), /* 0x3c (no sensor) */ STAT_NO_SENSOR(), /* 0x3d (no sensor) */ STAT_NO_SENSOR(), /* 0x3e (no sensor) */ STAT_NEXT_PAGE(), /* 0x3f Next page flag (not a sensor) */ /* Sensor page 2 MC_CMD_SENSOR_xxx */ STAT(Px, CONTROLLER_MASTER_VPTAT), /* 0x40 MASTER_VPTAT */ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP), /* 0x41 MASTER_INT_TEMP */ STAT(Px, CONTROLLER_MASTER_VPTAT_EXT_ADC), /* 0x42 MAST_VPTAT_EXT_ADC */ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC), /* 0x43 MASTER_INTERNAL_TEMP_EXT_ADC */ STAT(Px, CONTROLLER_SLAVE_VPTAT), /* 0x44 SLAVE_VPTAT */ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP), /* 0x45 SLAVE_INTERNAL_TEMP */ STAT(Px, CONTROLLER_SLAVE_VPTAT_EXT_ADC), /* 0x46 SLAVE_VPTAT_EXT_ADC */ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC), /* 0x47 SLAVE_INTERNAL_TEMP_EXT_ADC */ }; #define MCDI_STATIC_SENSOR_ASSERT(_field) \ EFX_STATIC_ASSERT(MC_CMD_SENSOR_STATE_ ## _field \ == EFX_MON_STAT_STATE_ ## _field) static void mcdi_mon_decode_stats( __in efx_nic_t *enp, __in_ecount(sensor_mask_size) uint32_t *sensor_mask, __in size_t sensor_mask_size, __in_opt efsys_mem_t *esmp, __out_ecount_opt(sensor_mask_size) uint32_t *stat_maskp, __inout_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *stat) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); uint16_t port_mask; uint16_t sensor; size_t sensor_max; uint32_t stat_mask[(EFX_ARRAY_SIZE(mcdi_sensor_map) + 31) / 32]; uint32_t idx = 0; uint32_t page = 0; /* Assert the MC_CMD_SENSOR and EFX_MON_STATE namespaces agree */ MCDI_STATIC_SENSOR_ASSERT(OK); MCDI_STATIC_SENSOR_ASSERT(WARNING); MCDI_STATIC_SENSOR_ASSERT(FATAL); MCDI_STATIC_SENSOR_ASSERT(BROKEN); MCDI_STATIC_SENSOR_ASSERT(NO_READING); EFX_STATIC_ASSERT(sizeof (stat_mask[0]) * 8 == EFX_MON_MASK_ELEMENT_SIZE); sensor_max = MIN((8 * sensor_mask_size), EFX_ARRAY_SIZE(mcdi_sensor_map)); port_mask = 1U << emip->emi_port; memset(stat_mask, 0, sizeof (stat_mask)); /* * The MCDI sensor readings in the DMA buffer are a packed array of * MC_CMD_SENSOR_VALUE_ENTRY structures, which only includes entries for * supported sensors (bit set in sensor_mask). The sensor_mask and * sensor readings do not include entries for the per-page NEXT_PAGE * flag. * * sensor_mask may legitimately contain MCDI sensors that the driver * does not understand. */ for (sensor = 0; sensor < sensor_max; ++sensor) { efx_mon_stat_t id = mcdi_sensor_map[sensor].msm_stat; if ((sensor % MCDI_MON_PAGE_SIZE) == MC_CMD_SENSOR_PAGE0_NEXT) { EFSYS_ASSERT3U(id, ==, MCDI_MON_NEXT_PAGE); page++; continue; } if (~(sensor_mask[page]) & (1U << sensor)) continue; idx++; if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0) continue; EFSYS_ASSERT(id < EFX_MON_NSTATS); /* * stat_mask is a bitmask indexed by EFX_MON_* monitor statistic * identifiers from efx_mon_stat_t (without NEXT_PAGE bits). * * If there is an entry in the MCDI sensor to monitor statistic * map then the sensor reading is used for the value of the * monitor statistic. */ stat_mask[id / EFX_MON_MASK_ELEMENT_SIZE] |= (1U << (id % EFX_MON_MASK_ELEMENT_SIZE)); if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) { efx_dword_t dword; /* Get MCDI sensor reading from DMA buffer */ EFSYS_MEM_READD(esmp, 4 * (idx - 1), &dword); /* Update EFX monitor stat from MCDI sensor reading */ stat[id].emsv_value = (uint16_t)EFX_DWORD_FIELD(dword, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); stat[id].emsv_state = (uint16_t)EFX_DWORD_FIELD(dword, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE); } } if (stat_maskp != NULL) { memcpy(stat_maskp, stat_mask, sizeof (stat_mask)); } } - __checkReturn int + __checkReturn efx_rc_t mcdi_mon_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_mon_stat_t *idp, __out efx_mon_stat_value_t *valuep) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint16_t port_mask; uint16_t sensor; uint16_t state; uint16_t value; efx_mon_stat_t id; - int rc; + efx_rc_t rc; port_mask = (emip->emi_port == 1) ? MCDI_MON_PORT_P1 : MCDI_MON_PORT_P2; sensor = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_MONITOR); state = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_STATE); value = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_VALUE); /* Hardware must support this MCDI sensor */ EFSYS_ASSERT3U(sensor, <, (8 * encp->enc_mcdi_sensor_mask_size)); EFSYS_ASSERT((sensor % MCDI_MON_PAGE_SIZE) != MC_CMD_SENSOR_PAGE0_NEXT); EFSYS_ASSERT(encp->enc_mcdi_sensor_maskp != NULL); EFSYS_ASSERT((encp->enc_mcdi_sensor_maskp[sensor / MCDI_MON_PAGE_SIZE] & (1U << (sensor % MCDI_MON_PAGE_SIZE))) != 0); /* But we don't have to understand it */ if (sensor >= EFX_ARRAY_SIZE(mcdi_sensor_map)) { rc = ENOTSUP; goto fail1; } id = mcdi_sensor_map[sensor].msm_stat; if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0) return (ENODEV); EFSYS_ASSERT(id < EFX_MON_NSTATS); *idp = id; valuep->emsv_value = value; valuep->emsv_state = state; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_read_sensors( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __in uint32_t size) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_READ_SENSORS_EXT_IN_LEN, MC_CMD_READ_SENSORS_EXT_OUT_LEN)]; uint32_t addr_lo, addr_hi; req.emr_cmd = MC_CMD_READ_SENSORS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_READ_SENSORS_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_READ_SENSORS_EXT_OUT_LEN; addr_lo = (uint32_t)(EFSYS_MEM_ADDR(esmp) & 0xffffffff); addr_hi = (uint32_t)(EFSYS_MEM_ADDR(esmp) >> 32); MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_LO, addr_lo); MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_HI, addr_hi); MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_LENGTH, size); efx_mcdi_execute(enp, &req); return (req.emr_rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_sensor_info_npages( __in efx_nic_t *enp, __out uint32_t *npagesp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN, MC_CMD_SENSOR_INFO_OUT_LENMAX)]; int page; - int rc; + efx_rc_t rc; EFSYS_ASSERT(npagesp != NULL); page = 0; do { (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SENSOR_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX; MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page++); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } } while (MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK) & (1 << MC_CMD_SENSOR_PAGE0_NEXT)); *npagesp = page; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t efx_mcdi_sensor_info( __in efx_nic_t *enp, __out_ecount(npages) uint32_t *sensor_maskp, __in size_t npages) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN, MC_CMD_SENSOR_INFO_OUT_LENMAX)]; uint32_t page; - int rc; + efx_rc_t rc; EFSYS_ASSERT(sensor_maskp != NULL); for (page = 0; page < npages; page++) { uint32_t mask; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SENSOR_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX; MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } mask = MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK); if ((page != (npages - 1)) && ((mask & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) == 0)) { rc = EINVAL; goto fail2; } sensor_maskp[page] = mask; } if (sensor_maskp[npages - 1] & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) { rc = EINVAL; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t mcdi_mon_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t size = encp->enc_mon_stat_dma_buf_size; - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_read_sensors(enp, esmp, size)) != 0) goto fail1; EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, size); mcdi_mon_decode_stats(enp, encp->enc_mcdi_sensor_maskp, encp->enc_mcdi_sensor_mask_size, esmp, NULL, values); return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t mcdi_mon_cfg_build( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t npages; - int rc; + efx_rc_t rc; switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: encp->enc_mon_type = EFX_MON_SFC90X0; break; #endif #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: encp->enc_mon_type = EFX_MON_SFC91X0; break; #endif default: rc = EINVAL; goto fail1; } /* Get mc sensor mask size */ npages = 0; if ((rc = efx_mcdi_sensor_info_npages(enp, &npages)) != 0) goto fail2; encp->enc_mon_stat_dma_buf_size = npages * EFX_MON_STATS_PAGE_SIZE; encp->enc_mcdi_sensor_mask_size = npages * sizeof (uint32_t); /* Allocate mc sensor mask */ EFSYS_KMEM_ALLOC(enp->en_esip, encp->enc_mcdi_sensor_mask_size, encp->enc_mcdi_sensor_maskp); if (encp->enc_mcdi_sensor_maskp == NULL) { rc = ENOMEM; goto fail3; } /* Read mc sensor mask */ if ((rc = efx_mcdi_sensor_info(enp, encp->enc_mcdi_sensor_maskp, npages)) != 0) goto fail4; /* Build monitor statistics mask */ mcdi_mon_decode_stats(enp, encp->enc_mcdi_sensor_maskp, encp->enc_mcdi_sensor_mask_size, NULL, encp->enc_mon_stat_mask, NULL); return (0); fail4: EFSYS_PROBE(fail4); EFSYS_KMEM_FREE(enp->en_esip, encp->enc_mcdi_sensor_mask_size, encp->enc_mcdi_sensor_maskp); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void mcdi_mon_cfg_free( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); if (encp->enc_mcdi_sensor_maskp != NULL) { EFSYS_KMEM_FREE(enp->en_esip, encp->enc_mcdi_sensor_mask_size, encp->enc_mcdi_sensor_maskp); } } #endif /* EFSYS_OPT_MON_STATS */ #endif /* EFSYS_OPT_MON_MCDI */ Index: stable/10/sys/dev/sfxge/common/mcdi_mon.h =================================================================== --- stable/10/sys/dev/sfxge/common/mcdi_mon.h (revision 293926) +++ stable/10/sys/dev/sfxge/common/mcdi_mon.h (revision 293927) @@ -1,76 +1,76 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_MCDI_MON_H #define _SYS_MCDI_MON_H #include "efx.h" #ifdef __cplusplus extern "C" { #endif #if EFSYS_OPT_MON_MCDI #if EFSYS_OPT_MON_STATS - __checkReturn int + __checkReturn efx_rc_t mcdi_mon_cfg_build( __in efx_nic_t *enp); void mcdi_mon_cfg_free( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t mcdi_mon_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_mon_stat_t *idp, __out efx_mon_stat_value_t *valuep); -extern __checkReturn int +extern __checkReturn efx_rc_t mcdi_mon_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values); #endif /* EFSYS_OPT_MON_STATS */ #endif /* EFSYS_OPT_MON_MCDI */ #ifdef __cplusplus } #endif #endif /* _SYS_MCDI_MON_H */ Index: stable/10/sys/dev/sfxge/common/siena_impl.h =================================================================== --- stable/10/sys/dev/sfxge/common/siena_impl.h (revision 293926) +++ stable/10/sys/dev/sfxge/common/siena_impl.h (revision 293927) @@ -1,484 +1,484 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_SIENA_IMPL_H #define _SYS_SIENA_IMPL_H #include "efx.h" #include "efx_regs.h" #include "efx_mcdi.h" #include "siena_flash.h" #ifdef __cplusplus extern "C" { #endif #if EFSYS_OPT_PHY_PROPS /* START MKCONFIG GENERATED SienaPhyHeaderPropsBlock a8db1f8eb5106efd */ typedef enum siena_phy_prop_e { SIENA_PHY_NPROPS } siena_phy_prop_t; /* END MKCONFIG GENERATED SienaPhyHeaderPropsBlock */ #endif /* EFSYS_OPT_PHY_PROPS */ #define SIENA_NVRAM_CHUNK 0x80 -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nic_probe( __in efx_nic_t *enp); #if EFSYS_OPT_PCIE_TUNE -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nic_pcie_extended_sync( __in efx_nic_t *enp); #endif -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nic_reset( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nic_init( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nic_register_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern void siena_nic_fini( __in efx_nic_t *enp); extern void siena_nic_unprobe( __in efx_nic_t *enp); #define SIENA_SRAM_ROWS 0x12000 extern void siena_sram_init( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG -extern __checkReturn int +extern __checkReturn efx_rc_t siena_sram_test( __in efx_nic_t *enp, __in efx_sram_pattern_fn_t func); #endif /* EFSYS_OPT_DIAG */ #if EFSYS_OPT_MCDI -extern __checkReturn int +extern __checkReturn efx_rc_t siena_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *mtp); extern void siena_mcdi_request_copyin( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __in unsigned int seq, __in boolean_t ev_cpl, __in boolean_t new_epoch); extern __checkReturn boolean_t siena_mcdi_request_poll( __in efx_nic_t *enp); extern void siena_mcdi_request_copyout( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp); -extern int +extern efx_rc_t siena_mcdi_poll_reboot( __in efx_nic_t *enp); extern void siena_mcdi_fini( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_mcdi_fw_update_supported( __in efx_nic_t *enp, __out boolean_t *supportedp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_mcdi_macaddr_change_supported( __in efx_nic_t *enp, __out boolean_t *supportedp); #endif /* EFSYS_OPT_MCDI */ #if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_partn_size( __in efx_nic_t *enp, __in unsigned int partn, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_partn_lock( __in efx_nic_t *enp, __in unsigned int partn); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_partn_read( __in efx_nic_t *enp, __in unsigned int partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_partn_erase( __in efx_nic_t *enp, __in unsigned int partn, __in unsigned int offset, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_partn_write( __in efx_nic_t *enp, __in unsigned int partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern void siena_nvram_partn_unlock( __in efx_nic_t *enp, __in unsigned int partn); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_get_dynamic_cfg( __in efx_nic_t *enp, __in unsigned int index, __in boolean_t vpd, __out siena_mc_dynamic_config_hdr_t **dcfgp, __out size_t *sizep); #endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ #if EFSYS_OPT_NVRAM #if EFSYS_OPT_DIAG -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_size( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_get_subtype( __in efx_nic_t *enp, __in unsigned int partn, __out uint32_t *subtypep); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_get_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_rw_start( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out size_t *pref_chunkp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_read_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_erase( __in efx_nic_t *enp, __in efx_nvram_type_t type); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_write_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __in_bcount(size) caddr_t data, __in size_t size); extern void siena_nvram_rw_finish( __in efx_nic_t *enp, __in efx_nvram_type_t type); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_nvram_set_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in_ecount(4) uint16_t version[4]); #endif /* EFSYS_OPT_NVRAM */ #if EFSYS_OPT_VPD -extern __checkReturn int +extern __checkReturn efx_rc_t siena_vpd_init( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_vpd_size( __in efx_nic_t *enp, __out size_t *sizep); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_vpd_set( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_vpd_next( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern void siena_vpd_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_VPD */ typedef struct siena_link_state_s { uint32_t sls_adv_cap_mask; uint32_t sls_lp_cap_mask; unsigned int sls_fcntl; efx_link_mode_t sls_link_mode; #if EFSYS_OPT_LOOPBACK efx_loopback_type_t sls_loopback; #endif boolean_t sls_mac_up; } siena_link_state_t; extern void siena_phy_link_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_link_mode_t *link_modep); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_phy_get_link( __in efx_nic_t *enp, __out siena_link_state_t *slsp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_phy_power( __in efx_nic_t *enp, __in boolean_t on); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_phy_reconfigure( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_phy_verify( __in efx_nic_t *enp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip); #if EFSYS_OPT_PHY_STATS extern void siena_phy_decode_stats( __in efx_nic_t *enp, __in uint32_t vmask, __in_opt efsys_mem_t *esmp, __out_opt uint64_t *smaskp, __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES extern const char * siena_phy_prop_name( __in efx_nic_t *enp, __in unsigned int id); #endif /* EFSYS_OPT_NAMES */ -extern __checkReturn int +extern __checkReturn efx_rc_t siena_phy_prop_get( __in efx_nic_t *enp, __in unsigned int id, __in uint32_t flags, __out uint32_t *valp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_phy_prop_set( __in efx_nic_t *enp, __in unsigned int id, __in uint32_t val); #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST -extern __checkReturn int +extern __checkReturn efx_rc_t siena_phy_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_phy_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt __drv_when(count > 0, __notnull) uint32_t *value_maskp, __out_ecount_opt(count) __drv_when(count > 0, __notnull) unsigned long *valuesp, __in size_t count); extern void siena_phy_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type); #endif /* EFSYS_OPT_BIST */ -extern __checkReturn int +extern __checkReturn efx_rc_t siena_mac_poll( __in efx_nic_t *enp, __out efx_link_mode_t *link_modep); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp); -extern __checkReturn int +extern __checkReturn efx_rc_t siena_mac_reconfigure( __in efx_nic_t *enp); #if EFSYS_OPT_LOOPBACK -extern __checkReturn int +extern __checkReturn efx_rc_t siena_mac_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t loopback_type); #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS -extern __checkReturn int +extern __checkReturn efx_rc_t siena_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp); #endif /* EFSYS_OPT_MAC_STATS */ #ifdef __cplusplus } #endif #endif /* _SYS_SIENA_IMPL_H */ Index: stable/10/sys/dev/sfxge/common/siena_mac.c =================================================================== --- stable/10/sys/dev/sfxge/common/siena_mac.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/siena_mac.c (revision 293927) @@ -1,436 +1,436 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_SIENA - __checkReturn int + __checkReturn efx_rc_t siena_mac_poll( __in efx_nic_t *enp, __out efx_link_mode_t *link_modep) { efx_port_t *epp = &(enp->en_port); siena_link_state_t sls; - int rc; + efx_rc_t rc; if ((rc = siena_phy_get_link(enp, &sls)) != 0) goto fail1; epp->ep_adv_cap_mask = sls.sls_adv_cap_mask; epp->ep_fcntl = sls.sls_fcntl; *link_modep = sls.sls_link_mode; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); *link_modep = EFX_LINK_UNKNOWN; return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp) { siena_link_state_t sls; - int rc; + efx_rc_t rc; /* * Because Siena doesn't *require* polling, we can't rely on * siena_mac_poll() being executed to populate epp->ep_mac_up. */ if ((rc = siena_phy_get_link(enp, &sls)) != 0) goto fail1; *mac_upp = sls.sls_mac_up; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_mac_reconfigure( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_oword_t multicast_hash[2]; efx_mcdi_req_t req; uint8_t payload[MAX(MAX(MC_CMD_SET_MAC_IN_LEN, MC_CMD_SET_MAC_OUT_LEN), MAX(MC_CMD_SET_MCAST_HASH_IN_LEN, MC_CMD_SET_MCAST_HASH_OUT_LEN))]; unsigned int fcntl; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_MAC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_MAC_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN; MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu); MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0); EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR), epp->ep_mac_addr); MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT, SET_MAC_IN_REJECT_UNCST, !epp->ep_all_unicst, SET_MAC_IN_REJECT_BRDCST, !epp->ep_brdcst); if (epp->ep_fcntl_autoneg) /* efx_fcntl_set() has already set the phy capabilities */ fcntl = MC_CMD_FCNTL_AUTO; else if (epp->ep_fcntl & EFX_FCNTL_RESPOND) fcntl = (epp->ep_fcntl & EFX_FCNTL_GENERATE) ? MC_CMD_FCNTL_BIDIR : MC_CMD_FCNTL_RESPOND; else fcntl = MC_CMD_FCNTL_OFF; MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, fcntl); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } /* Push multicast hash */ if (epp->ep_all_mulcst) { /* A hash matching all multicast is all 1s */ EFX_SET_OWORD(multicast_hash[0]); EFX_SET_OWORD(multicast_hash[1]); } else if (epp->ep_mulcst) { /* Use the hash set by the multicast list */ multicast_hash[0] = epp->ep_multicst_hash[0]; multicast_hash[1] = epp->ep_multicst_hash[1]; } else { /* A hash matching no traffic is simply 0 */ EFX_ZERO_OWORD(multicast_hash[0]); EFX_ZERO_OWORD(multicast_hash[1]); } /* * Broadcast packets go through the multicast hash filter. * The IEEE 802.3 CRC32 of the broadcast address is 0xbe2612ff * so we always add bit 0xff to the mask (bit 0x7f in the * second octword). */ if (epp->ep_brdcst) EFX_SET_OWORD_BIT(multicast_hash[1], 0x7f); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_MCAST_HASH; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_MCAST_HASH_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_MCAST_HASH_OUT_LEN; memcpy(MCDI_IN2(req, uint8_t, SET_MCAST_HASH_IN_HASH0), multicast_hash, sizeof (multicast_hash)); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_LOOPBACK - __checkReturn int + __checkReturn efx_rc_t siena_mac_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t loopback_type) { efx_port_t *epp = &(enp->en_port); efx_phy_ops_t *epop = epp->ep_epop; efx_loopback_type_t old_loopback_type; efx_link_mode_t old_loopback_link_mode; - int rc; + efx_rc_t rc; /* The PHY object handles this on Siena */ old_loopback_type = epp->ep_loopback_type; old_loopback_link_mode = epp->ep_loopback_link_mode; epp->ep_loopback_type = loopback_type; epp->ep_loopback_link_mode = link_mode; if ((rc = epop->epo_reconfigure(enp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE(fail2); epp->ep_loopback_type = old_loopback_type; epp->ep_loopback_link_mode = old_loopback_link_mode; return (rc); } #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS #define SIENA_MAC_STAT_READ(_esmp, _field, _eqp) \ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp) - __checkReturn int + __checkReturn efx_rc_t siena_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp) { efx_qword_t value; efx_qword_t generation_start; efx_qword_t generation_end; _NOTE(ARGUNUSED(enp)) /* Read END first so we don't race with the MC */ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END, &generation_end); EFSYS_MEM_READ_BARRIER(); /* TX */ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value); EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value); EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value); EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value); /* RX */ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value); EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value); EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]), &(value.eq_dword[0])); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]), &(value.eq_dword[1])); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]), &(value.eq_dword[0])); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]), &(value.eq_dword[1])); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]), &(value.eq_dword[0])); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]), &(value.eq_dword[1])); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]), &(value.eq_dword[0])); EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]), &(value.eq_dword[1])); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value); EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); EFSYS_MEM_READ_BARRIER(); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START, &generation_start); /* Check that we didn't read the stats in the middle of a DMA */ /* Not a good enough check ? */ if (memcmp(&generation_start, &generation_end, sizeof (generation_start))) return (EAGAIN); if (generationp) *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0); return (0); } #endif /* EFSYS_OPT_MAC_STATS */ #endif /* EFSYS_OPT_SIENA */ Index: stable/10/sys/dev/sfxge/common/siena_mcdi.c =================================================================== --- stable/10/sys/dev/sfxge/common/siena_mcdi.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/siena_mcdi.c (revision 293927) @@ -1,354 +1,354 @@ /*- * Copyright (c) 2012-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_SIENA && EFSYS_OPT_MCDI #define SIENA_MCDI_PDU(_emip) \ (((emip)->emi_port == 1) \ ? MC_SMEM_P0_PDU_OFST >> 2 \ : MC_SMEM_P1_PDU_OFST >> 2) #define SIENA_MCDI_DOORBELL(_emip) \ (((emip)->emi_port == 1) \ ? MC_SMEM_P0_DOORBELL_OFST >> 2 \ : MC_SMEM_P1_DOORBELL_OFST >> 2) #define SIENA_MCDI_STATUS(_emip) \ (((emip)->emi_port == 1) \ ? MC_SMEM_P0_STATUS_OFST >> 2 \ : MC_SMEM_P1_STATUS_OFST >> 2) void siena_mcdi_request_copyin( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __in unsigned int seq, __in boolean_t ev_cpl, __in boolean_t new_epoch) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_dword_t dword; unsigned int xflags; unsigned int pdur; unsigned int dbr; unsigned int pos; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); _NOTE(ARGUNUSED(new_epoch)) EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2); pdur = SIENA_MCDI_PDU(emip); dbr = SIENA_MCDI_DOORBELL(emip); xflags = 0; if (ev_cpl) xflags |= MCDI_HEADER_XFLAGS_EVREQ; /* Construct the header in shared memory */ EFX_POPULATE_DWORD_6(dword, MCDI_HEADER_CODE, emrp->emr_cmd, MCDI_HEADER_RESYNC, 1, MCDI_HEADER_DATALEN, emrp->emr_in_length, MCDI_HEADER_SEQ, seq, MCDI_HEADER_RESPONSE, 0, MCDI_HEADER_XFLAGS, xflags); EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, pdur, &dword, B_TRUE); for (pos = 0; pos < emrp->emr_in_length; pos += sizeof (efx_dword_t)) { memcpy(&dword, MCDI_IN(*emrp, efx_dword_t, pos), MIN(sizeof (dword), emrp->emr_in_length - pos)); EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, pdur + 1 + (pos >> 2), &dword, B_FALSE); } /* Ring the doorbell */ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xd004be11); EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, dbr, &dword, B_FALSE); } void siena_mcdi_request_copyout( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); unsigned int pos; unsigned int pdur; efx_dword_t data; pdur = SIENA_MCDI_PDU(emip); /* Copy payload out if caller supplied buffer */ if (emrp->emr_out_buf != NULL) { size_t bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length); for (pos = 0; pos < bytes; pos += sizeof (efx_dword_t)) { EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur + 1 + (pos >> 2), &data, B_FALSE); memcpy(MCDI_OUT(*emrp, efx_dword_t, pos), &data, MIN(sizeof (data), bytes - pos)); } } } - int + efx_rc_t siena_mcdi_poll_reboot( __in efx_nic_t *enp) { #ifndef EFX_GRACEFUL_MC_REBOOT /* * This function is not being used properly. * Until its callers are fixed, it should always return 0. */ _NOTE(ARGUNUSED(enp)) return (0); #else efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); unsigned int rebootr; efx_dword_t dword; uint32_t value; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2); rebootr = SIENA_MCDI_STATUS(emip); EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE); value = EFX_DWORD_FIELD(dword, EFX_DWORD_0); if (value == 0) return (0); EFX_ZERO_DWORD(dword); EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE); if (value == MC_STATUS_DWORD_ASSERT) return (EINTR); else return (EIO); #endif } __checkReturn boolean_t siena_mcdi_request_poll( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_mcdi_req_t *emrp; efx_dword_t dword; unsigned int pdur; unsigned int seq; unsigned int length; int state; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); /* Serialise against post-watchdog efx_mcdi_ev* */ EFSYS_LOCK(enp->en_eslp, state); EFSYS_ASSERT(emip->emi_pending_req != NULL); EFSYS_ASSERT(!emip->emi_ev_cpl); emrp = emip->emi_pending_req; /* Check for reboot atomically w.r.t efx_mcdi_request_start */ if (emip->emi_poll_cnt++ == 0) { if ((rc = siena_mcdi_poll_reboot(enp)) != 0) { emip->emi_pending_req = NULL; EFSYS_UNLOCK(enp->en_eslp, state); goto fail1; } } EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2); pdur = SIENA_MCDI_PDU(emip); /* Read the command header */ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur, &dword, B_FALSE); if (EFX_DWORD_FIELD(dword, MCDI_HEADER_RESPONSE) == 0) { EFSYS_UNLOCK(enp->en_eslp, state); return (B_FALSE); } /* Request complete */ emip->emi_pending_req = NULL; seq = (emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ); /* Check for synchronous reboot */ if (EFX_DWORD_FIELD(dword, MCDI_HEADER_ERROR) != 0 && EFX_DWORD_FIELD(dword, MCDI_HEADER_DATALEN) == 0) { /* Consume status word */ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); siena_mcdi_poll_reboot(enp); EFSYS_UNLOCK(enp->en_eslp, state); rc = EIO; goto fail2; } EFSYS_UNLOCK(enp->en_eslp, state); /* Check that the returned data is consistent */ if (EFX_DWORD_FIELD(dword, MCDI_HEADER_CODE) != emrp->emr_cmd || EFX_DWORD_FIELD(dword, MCDI_HEADER_SEQ) != seq) { /* Response is for a different request */ rc = EIO; goto fail3; } length = EFX_DWORD_FIELD(dword, MCDI_HEADER_DATALEN); if (EFX_DWORD_FIELD(dword, MCDI_HEADER_ERROR)) { efx_dword_t errdword; int errcode; EFSYS_ASSERT3U(length, ==, 4); EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur + 1 + (MC_CMD_ERR_CODE_OFST >> 2), &errdword, B_FALSE); errcode = EFX_DWORD_FIELD(errdword, EFX_DWORD_0); rc = efx_mcdi_request_errcode(errcode); if (!emrp->emr_quiet) { EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd, int, errcode); } goto fail4; } else { emrp->emr_out_length_used = length; emrp->emr_rc = 0; siena_mcdi_request_copyout(enp, emrp); } goto out; fail4: if (!emrp->emr_quiet) EFSYS_PROBE(fail4); fail3: if (!emrp->emr_quiet) EFSYS_PROBE(fail3); fail2: if (!emrp->emr_quiet) EFSYS_PROBE(fail2); fail1: if (!emrp->emr_quiet) - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); /* Fill out error state */ emrp->emr_rc = rc; emrp->emr_out_length_used = 0; /* Reboot/Assertion */ if (rc == EIO || rc == EINTR) efx_mcdi_raise_exception(enp, emrp, rc); out: return (B_TRUE); } - __checkReturn int + __checkReturn efx_rc_t siena_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *mtp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_oword_t oword; unsigned int portnum; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); /* Determine the port number to use for MCDI */ EFX_BAR_READO(enp, FR_AZ_CS_DEBUG_REG, &oword); portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM); if (portnum == 0) { /* Presumably booted from ROM; only MCDI port 1 will work */ emip->emi_port = 1; } else if (portnum <= 2) { emip->emi_port = portnum; } else { rc = EINVAL; goto fail1; } /* * Wipe the atomic reboot status so subsequent MCDI requests succeed. * BOOT_STATUS is preserved so eno_nic_probe() can boot out of the * assertion handler. */ (void) siena_mcdi_poll_reboot(enp); return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void siena_mcdi_fini( __in efx_nic_t *enp) { } - __checkReturn int + __checkReturn efx_rc_t siena_mcdi_fw_update_supported( __in efx_nic_t *enp, __out boolean_t *supportedp) { EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); *supportedp = B_TRUE; return (0); } - __checkReturn int + __checkReturn efx_rc_t siena_mcdi_macaddr_change_supported( __in efx_nic_t *enp, __out boolean_t *supportedp) { EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); *supportedp = B_TRUE; return (0); } #endif /* EFSYS_OPT_SIENA && EFSYS_OPT_MCDI */ Index: stable/10/sys/dev/sfxge/common/siena_nic.c =================================================================== --- stable/10/sys/dev/sfxge/common/siena_nic.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/siena_nic.c (revision 293927) @@ -1,581 +1,581 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #include "mcdi_mon.h" #if EFSYS_OPT_SIENA -static __checkReturn int +static __checkReturn efx_rc_t siena_nic_get_partn_mask( __in efx_nic_t *enp, __out unsigned int *maskp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_NVRAM_TYPES_IN_LEN, MC_CMD_NVRAM_TYPES_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_NVRAM_TYPES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_NVRAM_TYPES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_NVRAM_TYPES_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_NVRAM_TYPES_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *maskp = MCDI_OUT_DWORD(req, NVRAM_TYPES_OUT_TYPES); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_PCIE_TUNE - __checkReturn int + __checkReturn efx_rc_t siena_nic_pcie_extended_sync( __in efx_nic_t *enp) { - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG17230, B_TRUE, NULL) != 0)) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_PCIE_TUNE */ -static __checkReturn int +static __checkReturn efx_rc_t siena_board_cfg( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint8_t mac_addr[6]; efx_dword_t capabilities; uint32_t board_type; uint32_t nevq, nrxq, ntxq; - int rc; + efx_rc_t rc; /* External port identifier using one-based port numbering */ encp->enc_external_port = (uint8_t)enp->en_mcdi.em_emip.emi_port; /* Board configuration */ if ((rc = efx_mcdi_get_board_cfg(enp, &board_type, &capabilities, mac_addr)) != 0) goto fail1; EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); encp->enc_board_type = board_type; /* Additional capabilities */ encp->enc_clk_mult = 1; if (EFX_DWORD_FIELD(capabilities, MC_CMD_CAPABILITIES_TURBO)) { enp->en_features |= EFX_FEATURE_TURBO; if (EFX_DWORD_FIELD(capabilities, MC_CMD_CAPABILITIES_TURBO_ACTIVE)) { encp->enc_clk_mult = 2; } } encp->enc_evq_timer_quantum_ns = EFX_EVQ_SIENA_TIMER_QUANTUM_NS / encp->enc_clk_mult; encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; /* When hash header insertion is enabled, Siena inserts 16 bytes */ encp->enc_rx_prefix_size = 16; /* Alignment for receive packet DMA buffers */ encp->enc_rx_buf_align_start = 1; encp->enc_rx_buf_align_end = 1; /* Alignment for WPTR updates */ encp->enc_rx_push_align = 1; /* Resource limits */ rc = efx_mcdi_get_resource_limits(enp, &nevq, &nrxq, &ntxq); if (rc != 0) { if (rc != ENOTSUP) goto fail2; nevq = 1024; nrxq = EFX_RXQ_LIMIT_TARGET; ntxq = EFX_TXQ_LIMIT_TARGET; } encp->enc_evq_limit = nevq; encp->enc_rxq_limit = MIN(EFX_RXQ_LIMIT_TARGET, nrxq); encp->enc_txq_limit = MIN(EFX_TXQ_LIMIT_TARGET, ntxq); encp->enc_buftbl_limit = SIENA_SRAM_ROWS - (encp->enc_txq_limit * EFX_TXQ_DC_NDESCS(EFX_TXQ_DC_SIZE)) - (encp->enc_rxq_limit * EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE)); encp->enc_hw_tx_insert_vlan_enabled = B_FALSE; encp->enc_fw_assisted_tso_enabled = B_FALSE; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -static __checkReturn int +static __checkReturn efx_rc_t siena_phy_cfg( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); - int rc; + efx_rc_t rc; /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) goto fail1; #if EFSYS_OPT_PHY_STATS /* Convert the MCDI statistic mask into the EFX_PHY_STAT mask */ siena_phy_decode_stats(enp, encp->enc_mcdi_phy_stat_mask, NULL, &encp->enc_phy_stat_mask, NULL); #endif /* EFSYS_OPT_PHY_STATS */ return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_nic_probe( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); siena_link_state_t sls; unsigned int mask; efx_oword_t oword; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); /* Test BIU */ if ((rc = efx_nic_biu_test(enp)) != 0) goto fail1; /* Clear the region register */ EFX_POPULATE_OWORD_4(oword, FRF_AZ_ADR_REGION0, 0, FRF_AZ_ADR_REGION1, (1 << 16), FRF_AZ_ADR_REGION2, (2 << 16), FRF_AZ_ADR_REGION3, (3 << 16)); EFX_BAR_WRITEO(enp, FR_AZ_ADR_REGION_REG, &oword); /* Read clear any assertion state */ if ((rc = efx_mcdi_read_assertion(enp)) != 0) goto fail2; /* Exit the assertion handler */ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) goto fail3; /* Wrestle control from the BMC */ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0) goto fail4; if ((rc = siena_board_cfg(enp)) != 0) goto fail5; if ((rc = siena_phy_cfg(enp)) != 0) goto fail6; /* Obtain the default PHY advertised capabilities */ if ((rc = siena_nic_reset(enp)) != 0) goto fail7; if ((rc = siena_phy_get_link(enp, &sls)) != 0) goto fail8; epp->ep_default_adv_cap_mask = sls.sls_adv_cap_mask; epp->ep_adv_cap_mask = sls.sls_adv_cap_mask; #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM if ((rc = siena_nic_get_partn_mask(enp, &mask)) != 0) goto fail9; enp->en_u.siena.enu_partn_mask = mask; #endif #if EFSYS_OPT_MAC_STATS /* Wipe the MAC statistics */ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0) goto fail10; #endif #if EFSYS_OPT_LOOPBACK if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0) goto fail11; #endif #if EFSYS_OPT_MON_STATS if ((rc = mcdi_mon_cfg_build(enp)) != 0) goto fail12; #endif encp->enc_features = enp->en_features; return (0); #if EFSYS_OPT_MON_STATS fail12: EFSYS_PROBE(fail12); #endif #if EFSYS_OPT_LOOPBACK fail11: EFSYS_PROBE(fail11); #endif #if EFSYS_OPT_MAC_STATS fail10: EFSYS_PROBE(fail10); #endif #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM fail9: EFSYS_PROBE(fail9); #endif fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_nic_reset( __in efx_nic_t *enp) { efx_mcdi_req_t req; - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); /* siena_nic_reset() is called to recover from BADASSERT failures. */ if ((rc = efx_mcdi_read_assertion(enp)) != 0) goto fail1; if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) goto fail2; /* * Bug24908: ENTITY_RESET_IN_LEN is non zero but zero may be supplied * for backwards compatibility with PORT_RESET_IN_LEN. */ EFX_STATIC_ASSERT(MC_CMD_ENTITY_RESET_OUT_LEN == 0); req.emr_cmd = MC_CMD_ENTITY_RESET; req.emr_in_buf = NULL; req.emr_in_length = 0; req.emr_out_buf = NULL; req.emr_out_length = 0; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (0); } static void siena_nic_rx_cfg( __in efx_nic_t *enp) { efx_oword_t oword; /* * RX_INGR_EN is always enabled on Siena, because we rely on * the RX parser to be resiliant to missing SOP/EOP. */ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_INGR_EN, 1); EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword); /* Disable parsing of additional 802.1Q in Q packets */ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES, 0); EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); } static void siena_nic_usrev_dis( __in efx_nic_t *enp) { efx_oword_t oword; EFX_POPULATE_OWORD_1(oword, FRF_CZ_USREV_DIS, 1); EFX_BAR_WRITEO(enp, FR_CZ_USR_EV_CFG, &oword); } - __checkReturn int + __checkReturn efx_rc_t siena_nic_init( __in efx_nic_t *enp) { - int rc; + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); /* Enable reporting of some events (e.g. link change) */ if ((rc = efx_mcdi_log_ctrl(enp)) != 0) goto fail1; siena_sram_init(enp); /* Configure Siena's RX block */ siena_nic_rx_cfg(enp); /* Disable USR_EVents for now */ siena_nic_usrev_dis(enp); /* bug17057: Ensure set_link is called */ if ((rc = siena_phy_reconfigure(enp)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void siena_nic_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } void siena_nic_unprobe( __in efx_nic_t *enp) { #if EFSYS_OPT_MON_STATS mcdi_mon_cfg_free(enp); #endif /* EFSYS_OPT_MON_STATS */ (void) efx_mcdi_drv_attach(enp, B_FALSE); } #if EFSYS_OPT_DIAG static efx_register_set_t __siena_registers[] = { { FR_AZ_ADR_REGION_REG_OFST, 0, 1 }, { FR_CZ_USR_EV_CFG_OFST, 0, 1 }, { FR_AZ_RX_CFG_REG_OFST, 0, 1 }, { FR_AZ_TX_CFG_REG_OFST, 0, 1 }, { FR_AZ_TX_RESERVED_REG_OFST, 0, 1 }, { FR_AZ_SRM_TX_DC_CFG_REG_OFST, 0, 1 }, { FR_AZ_RX_DC_CFG_REG_OFST, 0, 1 }, { FR_AZ_RX_DC_PF_WM_REG_OFST, 0, 1 }, { FR_AZ_DP_CTRL_REG_OFST, 0, 1 }, { FR_BZ_RX_RSS_TKEY_REG_OFST, 0, 1}, { FR_CZ_RX_RSS_IPV6_REG1_OFST, 0, 1}, { FR_CZ_RX_RSS_IPV6_REG2_OFST, 0, 1}, { FR_CZ_RX_RSS_IPV6_REG3_OFST, 0, 1} }; static const uint32_t __siena_register_masks[] = { 0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x000103FF, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000, 0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF, 0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF, 0x001FFFFF, 0x00000000, 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x000003FF, 0x00000000, 0x00000000, 0x00000000, 0x00000FFF, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000 }; static efx_register_set_t __siena_tables[] = { { FR_AZ_RX_FILTER_TBL0_OFST, FR_AZ_RX_FILTER_TBL0_STEP, FR_AZ_RX_FILTER_TBL0_ROWS }, { FR_CZ_RX_MAC_FILTER_TBL0_OFST, FR_CZ_RX_MAC_FILTER_TBL0_STEP, FR_CZ_RX_MAC_FILTER_TBL0_ROWS }, { FR_AZ_RX_DESC_PTR_TBL_OFST, FR_AZ_RX_DESC_PTR_TBL_STEP, FR_CZ_RX_DESC_PTR_TBL_ROWS }, { FR_AZ_TX_DESC_PTR_TBL_OFST, FR_AZ_TX_DESC_PTR_TBL_STEP, FR_CZ_TX_DESC_PTR_TBL_ROWS }, { FR_AZ_TIMER_TBL_OFST, FR_AZ_TIMER_TBL_STEP, FR_CZ_TIMER_TBL_ROWS }, { FR_CZ_TX_FILTER_TBL0_OFST, FR_CZ_TX_FILTER_TBL0_STEP, FR_CZ_TX_FILTER_TBL0_ROWS }, { FR_CZ_TX_MAC_FILTER_TBL0_OFST, FR_CZ_TX_MAC_FILTER_TBL0_STEP, FR_CZ_TX_MAC_FILTER_TBL0_ROWS } }; static const uint32_t __siena_table_masks[] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000003FF, 0xFFFF0FFF, 0xFFFFFFFF, 0x00000E7F, 0x00000000, 0xFFFFFFFE, 0x0FFFFFFF, 0x01800000, 0x00000000, 0xFFFFFFFE, 0x0FFFFFFF, 0x0C000000, 0x00000000, 0x3FFFFFFF, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000013FF, 0xFFFF07FF, 0xFFFFFFFF, 0x0000007F, 0x00000000, }; - __checkReturn int + __checkReturn efx_rc_t siena_nic_register_test( __in efx_nic_t *enp) { efx_register_set_t *rsp; const uint32_t *dwordp; unsigned int nitems; unsigned int count; - int rc; + efx_rc_t rc; /* Fill out the register mask entries */ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_register_masks) == EFX_ARRAY_SIZE(__siena_registers) * 4); nitems = EFX_ARRAY_SIZE(__siena_registers); dwordp = __siena_register_masks; for (count = 0; count < nitems; ++count) { rsp = __siena_registers + count; rsp->mask.eo_u32[0] = *dwordp++; rsp->mask.eo_u32[1] = *dwordp++; rsp->mask.eo_u32[2] = *dwordp++; rsp->mask.eo_u32[3] = *dwordp++; } /* Fill out the register table entries */ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_table_masks) == EFX_ARRAY_SIZE(__siena_tables) * 4); nitems = EFX_ARRAY_SIZE(__siena_tables); dwordp = __siena_table_masks; for (count = 0; count < nitems; ++count) { rsp = __siena_tables + count; rsp->mask.eo_u32[0] = *dwordp++; rsp->mask.eo_u32[1] = *dwordp++; rsp->mask.eo_u32[2] = *dwordp++; rsp->mask.eo_u32[3] = *dwordp++; } if ((rc = efx_nic_test_registers(enp, __siena_registers, EFX_ARRAY_SIZE(__siena_registers))) != 0) goto fail1; if ((rc = efx_nic_test_tables(enp, __siena_tables, EFX_PATTERN_BYTE_ALTERNATE, EFX_ARRAY_SIZE(__siena_tables))) != 0) goto fail2; if ((rc = efx_nic_test_tables(enp, __siena_tables, EFX_PATTERN_BYTE_CHANGING, EFX_ARRAY_SIZE(__siena_tables))) != 0) goto fail3; if ((rc = efx_nic_test_tables(enp, __siena_tables, EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0) goto fail4; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ #endif /* EFSYS_OPT_SIENA */ Index: stable/10/sys/dev/sfxge/common/siena_nvram.c =================================================================== --- stable/10/sys/dev/sfxge/common/siena_nvram.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/siena_nvram.c (revision 293927) @@ -1,871 +1,871 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_SIENA #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM - __checkReturn int + __checkReturn efx_rc_t siena_nvram_partn_size( __in efx_nic_t *enp, __in unsigned int partn, __out size_t *sizep) { - int rc; + efx_rc_t rc; if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) { rc = ENOTSUP; goto fail1; } if ((rc = efx_mcdi_nvram_info(enp, partn, sizep, NULL, NULL)) != 0) { goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_nvram_partn_lock( __in efx_nic_t *enp, __in unsigned int partn) { - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) { goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_nvram_partn_read( __in efx_nic_t *enp, __in unsigned int partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { size_t chunk; - int rc; + efx_rc_t rc; while (size > 0) { chunk = MIN(size, SIENA_NVRAM_CHUNK); if ((rc = efx_mcdi_nvram_read(enp, partn, offset, data, chunk)) != 0) { goto fail1; } size -= chunk; data += chunk; offset += chunk; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_nvram_partn_erase( __in efx_nic_t *enp, __in unsigned int partn, __in unsigned int offset, __in size_t size) { - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) { goto fail1; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_nvram_partn_write( __in efx_nic_t *enp, __in unsigned int partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { size_t chunk; - int rc; + efx_rc_t rc; while (size > 0) { chunk = MIN(size, SIENA_NVRAM_CHUNK); if ((rc = efx_mcdi_nvram_write(enp, partn, offset, data, chunk)) != 0) { goto fail1; } size -= chunk; data += chunk; offset += chunk; } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void siena_nvram_partn_unlock( __in efx_nic_t *enp, __in unsigned int partn) { boolean_t reboot; - int rc; + efx_rc_t rc; /* * Reboot into the new image only for PHYs. The driver has to * explicitly cope with an MC reboot after a firmware update. */ reboot = (partn == MC_CMD_NVRAM_TYPE_PHY_PORT0 || partn == MC_CMD_NVRAM_TYPE_PHY_PORT1 || partn == MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO); if ((rc = efx_mcdi_nvram_update_finish(enp, partn, reboot)) != 0) { goto fail1; } return; fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); } #endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ #if EFSYS_OPT_NVRAM typedef struct siena_parttbl_entry_s { unsigned int partn; unsigned int port; efx_nvram_type_t nvtype; } siena_parttbl_entry_t; static siena_parttbl_entry_t siena_parttbl[] = { {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 1, EFX_NVRAM_NULLPHY}, {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 2, EFX_NVRAM_NULLPHY}, {MC_CMD_NVRAM_TYPE_MC_FW, 1, EFX_NVRAM_MC_FIRMWARE}, {MC_CMD_NVRAM_TYPE_MC_FW, 2, EFX_NVRAM_MC_FIRMWARE}, {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 1, EFX_NVRAM_MC_GOLDEN}, {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 2, EFX_NVRAM_MC_GOLDEN}, {MC_CMD_NVRAM_TYPE_EXP_ROM, 1, EFX_NVRAM_BOOTROM}, {MC_CMD_NVRAM_TYPE_EXP_ROM, 2, EFX_NVRAM_BOOTROM}, {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG}, {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG}, {MC_CMD_NVRAM_TYPE_PHY_PORT0, 1, EFX_NVRAM_PHY}, {MC_CMD_NVRAM_TYPE_PHY_PORT1, 2, EFX_NVRAM_PHY}, {MC_CMD_NVRAM_TYPE_FPGA, 1, EFX_NVRAM_FPGA}, {MC_CMD_NVRAM_TYPE_FPGA, 2, EFX_NVRAM_FPGA}, {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP}, {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP}, {MC_CMD_NVRAM_TYPE_FC_FW, 1, EFX_NVRAM_FCFW}, {MC_CMD_NVRAM_TYPE_FC_FW, 2, EFX_NVRAM_FCFW}, {MC_CMD_NVRAM_TYPE_CPLD, 1, EFX_NVRAM_CPLD}, {MC_CMD_NVRAM_TYPE_CPLD, 2, EFX_NVRAM_CPLD}, }; static __checkReturn siena_parttbl_entry_t * siena_parttbl_entry( __in efx_nic_t *enp, __in efx_nvram_type_t type) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); siena_parttbl_entry_t *entry; unsigned int i; EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) { entry = &siena_parttbl[i]; if (entry->port == emip->emi_port && entry->nvtype == type) return (entry); } return (NULL); } #if EFSYS_OPT_DIAG - __checkReturn int + __checkReturn efx_rc_t siena_nvram_test( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); siena_parttbl_entry_t *entry; unsigned int i; - int rc; + efx_rc_t rc; /* * Iterate over the list of supported partition types * applicable to *this* port */ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) { entry = &siena_parttbl[i]; if (entry->port != emip->emi_port || !(enp->en_u.siena.enu_partn_mask & (1 << entry->partn))) continue; if ((rc = efx_mcdi_nvram_test(enp, entry->partn)) != 0) { goto fail1; } } return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ - __checkReturn int + __checkReturn efx_rc_t siena_nvram_size( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out size_t *sizep) { siena_parttbl_entry_t *entry; - int rc; + efx_rc_t rc; if ((entry = siena_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = siena_nvram_partn_size(enp, entry->partn, sizep)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); *sizep = 0; return (rc); } #define SIENA_DYNAMIC_CFG_SIZE(_nitems) \ (sizeof (siena_mc_dynamic_config_hdr_t) + ((_nitems) * \ sizeof (((siena_mc_dynamic_config_hdr_t *)NULL)->fw_version[0]))) - __checkReturn int + __checkReturn efx_rc_t siena_nvram_get_dynamic_cfg( __in efx_nic_t *enp, __in unsigned int partn, __in boolean_t vpd, __out siena_mc_dynamic_config_hdr_t **dcfgp, __out size_t *sizep) { siena_mc_dynamic_config_hdr_t *dcfg = NULL; size_t size; uint8_t cksum; unsigned int vpd_offset; unsigned int vpd_length; unsigned int hdr_length; unsigned int nversions; unsigned int pos; unsigned int region; - int rc; + efx_rc_t rc; EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 || partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1); /* * Allocate sufficient memory for the entire dynamiccfg area, even * if we're not actually going to read in the VPD. */ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0) goto fail1; EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg); if (dcfg == NULL) { rc = ENOMEM; goto fail2; } if ((rc = siena_nvram_partn_read(enp, partn, 0, (caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0) goto fail3; /* Verify the magic */ if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0) != SIENA_MC_DYNAMIC_CONFIG_MAGIC) goto invalid1; /* All future versions of the structure must be backwards compatable */ EFX_STATIC_ASSERT(SIENA_MC_DYNAMIC_CONFIG_VERSION == 0); hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0); nversions = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0); vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0); vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0); /* Verify the hdr doesn't overflow the partn size */ if (hdr_length > size || vpd_offset > size || vpd_length > size || vpd_length + vpd_offset > size) goto invalid2; /* Verify the header has room for all it's versions */ if (hdr_length < SIENA_DYNAMIC_CFG_SIZE(0) || hdr_length < SIENA_DYNAMIC_CFG_SIZE(nversions)) goto invalid3; /* * Read the remaining portion of the dcfg, either including * the whole of VPD (there is no vpd length in this structure, * so we have to parse each tag), or just the dcfg header itself */ region = vpd ? vpd_offset + vpd_length : hdr_length; if (region > SIENA_NVRAM_CHUNK) { if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK, (caddr_t)dcfg + SIENA_NVRAM_CHUNK, region - SIENA_NVRAM_CHUNK)) != 0) goto fail4; } /* Verify checksum */ cksum = 0; for (pos = 0; pos < hdr_length; pos++) cksum += ((uint8_t *)dcfg)[pos]; if (cksum != 0) goto invalid4; goto done; invalid4: EFSYS_PROBE(invalid4); invalid3: EFSYS_PROBE(invalid3); invalid2: EFSYS_PROBE(invalid2); invalid1: EFSYS_PROBE(invalid1); /* * Construct a new "null" dcfg, with an empty version vector, * and an empty VPD chunk trailing. This has the neat side effect * of testing the exception paths in the write path. */ EFX_POPULATE_DWORD_1(dcfg->magic, EFX_DWORD_0, SIENA_MC_DYNAMIC_CONFIG_MAGIC); EFX_POPULATE_WORD_1(dcfg->length, EFX_WORD_0, sizeof (*dcfg)); EFX_POPULATE_BYTE_1(dcfg->version, EFX_BYTE_0, SIENA_MC_DYNAMIC_CONFIG_VERSION); EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset, EFX_DWORD_0, sizeof (*dcfg)); EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, 0); EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items, EFX_DWORD_0, 0); done: *dcfgp = dcfg; *sizep = size; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); EFSYS_KMEM_FREE(enp->en_esip, size, dcfg); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_nvram_get_subtype( __in efx_nic_t *enp, __in unsigned int partn, __out uint32_t *subtypep) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN, MC_CMD_GET_BOARD_CFG_OUT_LENMAX)]; efx_word_t *fw_list; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_BOARD_CFG; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMAX; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { rc = EMSGSIZE; goto fail2; } if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST + (partn + 1) * sizeof (efx_word_t)) { rc = ENOENT; goto fail3; } fw_list = MCDI_OUT2(req, efx_word_t, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST); *subtypep = EFX_WORD_FIELD(fw_list[partn], EFX_WORD_0); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_nvram_get_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]) { siena_mc_dynamic_config_hdr_t *dcfg; siena_parttbl_entry_t *entry; unsigned int dcfg_partn; unsigned int partn; unsigned int i; - int rc; + efx_rc_t rc; if ((entry = siena_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } partn = entry->partn; if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) { rc = ENOTSUP; goto fail2; } if ((rc = siena_nvram_get_subtype(enp, partn, subtypep)) != 0) goto fail3; /* * Some partitions are accessible from both ports (for instance BOOTROM) * Find the highest version reported by all dcfg structures on ports * that have access to this partition. */ version[0] = version[1] = version[2] = version[3] = 0; for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) { unsigned int nitems; uint16_t temp[4]; size_t length; entry = &siena_parttbl[i]; if (entry->partn != partn) continue; dcfg_partn = (entry->port == 1) ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; /* * Ingore missing partitions on port 2, assuming they're due * to to running on a single port part. */ if ((1 << dcfg_partn) & ~enp->en_u.siena.enu_partn_mask) { if (entry->port == 2) continue; } if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn, B_FALSE, &dcfg, &length)) != 0) goto fail4; nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0); if (nitems < entry->partn) goto done; temp[0] = EFX_WORD_FIELD(dcfg->fw_version[partn].version_w, EFX_WORD_0); temp[1] = EFX_WORD_FIELD(dcfg->fw_version[partn].version_x, EFX_WORD_0); temp[2] = EFX_WORD_FIELD(dcfg->fw_version[partn].version_y, EFX_WORD_0); temp[3] = EFX_WORD_FIELD(dcfg->fw_version[partn].version_z, EFX_WORD_0); if (memcmp(version, temp, sizeof (temp)) < 0) memcpy(version, temp, sizeof (temp)); done: EFSYS_KMEM_FREE(enp->en_esip, length, dcfg); } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_nvram_rw_start( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out size_t *chunk_sizep) { siena_parttbl_entry_t *entry; - int rc; + efx_rc_t rc; if ((entry = siena_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = siena_nvram_partn_lock(enp, entry->partn)) != 0) goto fail2; if (chunk_sizep != NULL) *chunk_sizep = SIENA_NVRAM_CHUNK; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_nvram_read_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { siena_parttbl_entry_t *entry; - int rc; + efx_rc_t rc; if ((entry = siena_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = siena_nvram_partn_read(enp, entry->partn, offset, data, size)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_nvram_erase( __in efx_nic_t *enp, __in efx_nvram_type_t type) { siena_parttbl_entry_t *entry; size_t size; - int rc; + efx_rc_t rc; if ((entry = siena_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = siena_nvram_partn_size(enp, entry->partn, &size)) != 0) goto fail2; if ((rc = siena_nvram_partn_erase(enp, entry->partn, 0, size)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_nvram_write_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __in_bcount(size) caddr_t data, __in size_t size) { siena_parttbl_entry_t *entry; - int rc; + efx_rc_t rc; if ((entry = siena_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = siena_nvram_partn_write(enp, entry->partn, offset, data, size)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void siena_nvram_rw_finish( __in efx_nic_t *enp, __in efx_nvram_type_t type) { siena_parttbl_entry_t *entry; if ((entry = siena_parttbl_entry(enp, type)) != NULL) siena_nvram_partn_unlock(enp, entry->partn); } - __checkReturn int + __checkReturn efx_rc_t siena_nvram_set_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in_ecount(4) uint16_t version[4]) { siena_mc_dynamic_config_hdr_t *dcfg = NULL; siena_parttbl_entry_t *entry; unsigned int dcfg_partn; size_t partn_size; unsigned int hdr_length; unsigned int vpd_length; unsigned int vpd_offset; unsigned int nitems; unsigned int required_hdr_length; unsigned int pos; uint8_t cksum; uint32_t subtype; size_t length; - int rc; + efx_rc_t rc; if ((entry = siena_parttbl_entry(enp, type)) == NULL) { rc = ENOTSUP; goto fail1; } dcfg_partn = (entry->port == 1) ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0) goto fail2; if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0) goto fail2; if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn, B_TRUE, &dcfg, &length)) != 0) goto fail3; hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0); nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0); vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0); vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0); /* * NOTE: This function will blatt any fields trailing the version * vector, or the VPD chunk. */ required_hdr_length = SIENA_DYNAMIC_CFG_SIZE(entry->partn + 1); if (required_hdr_length + vpd_length > length) { rc = ENOSPC; goto fail4; } if (vpd_offset < required_hdr_length) { (void) memmove((caddr_t)dcfg + required_hdr_length, (caddr_t)dcfg + vpd_offset, vpd_length); vpd_offset = required_hdr_length; EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset, EFX_DWORD_0, vpd_offset); } if (hdr_length < required_hdr_length) { (void) memset((caddr_t)dcfg + hdr_length, 0, required_hdr_length - hdr_length); hdr_length = required_hdr_length; EFX_POPULATE_WORD_1(dcfg->length, EFX_WORD_0, hdr_length); } /* Get the subtype to insert into the fw_subtype array */ if ((rc = siena_nvram_get_subtype(enp, entry->partn, &subtype)) != 0) goto fail5; /* Fill out the new version */ EFX_POPULATE_DWORD_1(dcfg->fw_version[entry->partn].fw_subtype, EFX_DWORD_0, subtype); EFX_POPULATE_WORD_1(dcfg->fw_version[entry->partn].version_w, EFX_WORD_0, version[0]); EFX_POPULATE_WORD_1(dcfg->fw_version[entry->partn].version_x, EFX_WORD_0, version[1]); EFX_POPULATE_WORD_1(dcfg->fw_version[entry->partn].version_y, EFX_WORD_0, version[2]); EFX_POPULATE_WORD_1(dcfg->fw_version[entry->partn].version_z, EFX_WORD_0, version[3]); /* Update the version count */ if (nitems < entry->partn + 1) { nitems = entry->partn + 1; EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items, EFX_DWORD_0, nitems); } /* Update the checksum */ cksum = 0; for (pos = 0; pos < hdr_length; pos++) cksum += ((uint8_t *)dcfg)[pos]; dcfg->csum.eb_u8[0] -= cksum; /* Erase and write the new partition */ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0) goto fail6; /* Write out the new structure to nvram */ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0, (caddr_t)dcfg, vpd_offset + vpd_length)) != 0) goto fail7; EFSYS_KMEM_FREE(enp->en_esip, length, dcfg); siena_nvram_partn_unlock(enp, dcfg_partn); return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); EFSYS_KMEM_FREE(enp->en_esip, length, dcfg); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_NVRAM */ #endif /* EFSYS_OPT_SIENA */ Index: stable/10/sys/dev/sfxge/common/siena_phy.c =================================================================== --- stable/10/sys/dev/sfxge/common/siena_phy.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/siena_phy.c (revision 293927) @@ -1,842 +1,842 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_SIENA static void siena_phy_decode_cap( __in uint32_t mcdi_cap, __out uint32_t *maskp) { uint32_t mask; mask = 0; if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) mask |= (1 << EFX_PHY_CAP_10HDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) mask |= (1 << EFX_PHY_CAP_10FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) mask |= (1 << EFX_PHY_CAP_100HDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) mask |= (1 << EFX_PHY_CAP_100FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) mask |= (1 << EFX_PHY_CAP_1000HDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_1000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_10000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) mask |= (1 << EFX_PHY_CAP_PAUSE); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) mask |= (1 << EFX_PHY_CAP_ASYM); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) mask |= (1 << EFX_PHY_CAP_AN); *maskp = mask; } static void siena_phy_decode_link_mode( __in efx_nic_t *enp, __in uint32_t link_flags, __in unsigned int speed, __in unsigned int fcntl, __out efx_link_mode_t *link_modep, __out unsigned int *fcntlp) { boolean_t fd = !!(link_flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN)); boolean_t up = !!(link_flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); _NOTE(ARGUNUSED(enp)) if (!up) *link_modep = EFX_LINK_DOWN; else if (speed == 10000 && fd) *link_modep = EFX_LINK_10000FDX; else if (speed == 1000) *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX; else if (speed == 100) *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX; else if (speed == 10) *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX; else *link_modep = EFX_LINK_UNKNOWN; if (fcntl == MC_CMD_FCNTL_OFF) *fcntlp = 0; else if (fcntl == MC_CMD_FCNTL_RESPOND) *fcntlp = EFX_FCNTL_RESPOND; else if (fcntl == MC_CMD_FCNTL_BIDIR) *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; else { EFSYS_PROBE1(mc_pcol_error, int, fcntl); *fcntlp = 0; } } void siena_phy_link_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_link_mode_t *link_modep) { efx_port_t *epp = &(enp->en_port); unsigned int link_flags; unsigned int speed; unsigned int fcntl; efx_link_mode_t link_mode; uint32_t lp_cap_mask; /* * Convert the LINKCHANGE speed enumeration into mbit/s, in the * same way as GET_LINK encodes the speed */ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) { case MCDI_EVENT_LINKCHANGE_SPEED_100M: speed = 100; break; case MCDI_EVENT_LINKCHANGE_SPEED_1G: speed = 1000; break; case MCDI_EVENT_LINKCHANGE_SPEED_10G: speed = 10000; break; default: speed = 0; break; } link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS); siena_phy_decode_link_mode(enp, link_flags, speed, MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL), &link_mode, &fcntl); siena_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP), &lp_cap_mask); /* * It's safe to update ep_lp_cap_mask without the driver's port lock * because presumably any concurrently running efx_port_poll() is * only going to arrive at the same value. * * ep_fcntl has two meanings. It's either the link common fcntl * (if the PHY supports AN), or it's the forced link state. If * the former, it's safe to update the value for the same reason as * for ep_lp_cap_mask. If the latter, then just ignore the value, * because we can race with efx_mac_fcntl_set(). */ epp->ep_lp_cap_mask = lp_cap_mask; if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN)) epp->ep_fcntl = fcntl; *link_modep = link_mode; } - __checkReturn int + __checkReturn efx_rc_t siena_phy_power( __in efx_nic_t *enp, __in boolean_t power) { - int rc; + efx_rc_t rc; if (!power) return (0); /* Check if the PHY is a zombie */ if ((rc = siena_phy_verify(enp)) != 0) goto fail1; enp->en_reset_flags |= EFX_RESET_PHY; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_phy_get_link( __in efx_nic_t *enp, __out siena_link_state_t *slsp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN, MC_CMD_GET_LINK_OUT_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_LINK; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_LINK_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) { rc = EMSGSIZE; goto fail2; } siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP), &slsp->sls_adv_cap_mask); siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP), &slsp->sls_lp_cap_mask); siena_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS), MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED), MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL), &slsp->sls_link_mode, &slsp->sls_fcntl); #if EFSYS_OPT_LOOPBACK /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD); slsp->sls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE); #endif /* EFSYS_OPT_LOOPBACK */ slsp->sls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_phy_reconfigure( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_mcdi_req_t req; uint8_t payload[MAX(MAX(MC_CMD_SET_ID_LED_IN_LEN, MC_CMD_SET_ID_LED_OUT_LEN), MAX(MC_CMD_SET_LINK_IN_LEN, MC_CMD_SET_LINK_OUT_LEN))]; uint32_t cap_mask; unsigned int led_mode; unsigned int speed; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_LINK; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_LINK_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN; cap_mask = epp->ep_adv_cap_mask; MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP, PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1, PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1, PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1, PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1, PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1, PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1, PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1, PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1, PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1, PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1); #if EFSYS_OPT_LOOPBACK MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, epp->ep_loopback_type); switch (epp->ep_loopback_link_mode) { case EFX_LINK_100FDX: speed = 100; break; case EFX_LINK_1000FDX: speed = 1000; break; case EFX_LINK_10000FDX: speed = 10000; break; default: speed = 0; } #else MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE); speed = 0; #endif /* EFSYS_OPT_LOOPBACK */ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed); #if EFSYS_OPT_PHY_FLAGS MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags); #else MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0); #endif /* EFSYS_OPT_PHY_FLAGS */ efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } /* And set the blink mode */ (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_ID_LED; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN; #if EFSYS_OPT_PHY_LED_CONTROL switch (epp->ep_phy_led_mode) { case EFX_PHY_LED_DEFAULT: led_mode = MC_CMD_LED_DEFAULT; break; case EFX_PHY_LED_OFF: led_mode = MC_CMD_LED_OFF; break; case EFX_PHY_LED_ON: led_mode = MC_CMD_LED_ON; break; default: EFSYS_ASSERT(0); led_mode = MC_CMD_LED_DEFAULT; } MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode); #else MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT); #endif /* EFSYS_OPT_PHY_LED_CONTROL */ efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_phy_verify( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN, MC_CMD_GET_PHY_STATE_OUT_LEN)]; uint32_t state; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PHY_STATE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) { rc = EMSGSIZE; goto fail2; } state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE); if (state != MC_CMD_PHY_STATE_OK) { if (state != MC_CMD_PHY_STATE_ZOMBIE) EFSYS_PROBE1(mc_pcol_error, int, state); rc = ENOTACTIVE; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip) { _NOTE(ARGUNUSED(enp, ouip)) return (ENOTSUP); } #if EFSYS_OPT_PHY_STATS #define SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \ _mc_record, _efx_record) \ if ((_vmask) & (1ULL << (_mc_record))) { \ (_smask) |= (1ULL << (_efx_record)); \ if ((_stat) != NULL && !EFSYS_MEM_IS_NULL(_esmp)) { \ efx_dword_t dword; \ EFSYS_MEM_READD(_esmp, (_mc_record) * 4, &dword);\ (_stat)[_efx_record] = \ EFX_DWORD_FIELD(dword, EFX_DWORD_0); \ } \ } #define SIENA_SIMPLE_STAT_SET2(_vmask, _esmp, _smask, _stat, _record) \ SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \ MC_CMD_ ## _record, \ EFX_PHY_STAT_ ## _record) void siena_phy_decode_stats( __in efx_nic_t *enp, __in uint32_t vmask, __in_opt efsys_mem_t *esmp, __out_opt uint64_t *smaskp, __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat) { uint64_t smask = 0; _NOTE(ARGUNUSED(enp)) SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, OUI); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_LINK_UP); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_RX_FAULT); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_TX_FAULT); if (vmask & (1 << MC_CMD_PMA_PMD_SIGNAL)) { smask |= ((1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_A) | (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_B) | (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_C) | (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_D)); if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) { efx_dword_t dword; uint32_t sig; EFSYS_MEM_READD(esmp, 4 * MC_CMD_PMA_PMD_SIGNAL, &dword); sig = EFX_DWORD_FIELD(dword, EFX_DWORD_0); stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_A] = (sig >> 1) & 1; stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_B] = (sig >> 2) & 1; stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_C] = (sig >> 3) & 1; stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_D] = (sig >> 4) & 1; } } SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_A, EFX_PHY_STAT_SNR_A); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_B, EFX_PHY_STAT_SNR_B); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_C, EFX_PHY_STAT_SNR_C); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_D, EFX_PHY_STAT_SNR_D); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_LINK_UP); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_RX_FAULT); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_TX_FAULT); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BER); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BLOCK_ERRORS); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_LINK_UP, EFX_PHY_STAT_PHY_XS_LINK_UP); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_RX_FAULT, EFX_PHY_STAT_PHY_XS_RX_FAULT); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_TX_FAULT, EFX_PHY_STAT_PHY_XS_TX_FAULT); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_ALIGN, EFX_PHY_STAT_PHY_XS_ALIGN); if (vmask & (1 << MC_CMD_PHYXS_SYNC)) { smask |= ((1 << EFX_PHY_STAT_PHY_XS_SYNC_A) | (1 << EFX_PHY_STAT_PHY_XS_SYNC_B) | (1 << EFX_PHY_STAT_PHY_XS_SYNC_C) | (1 << EFX_PHY_STAT_PHY_XS_SYNC_D)); if (stat != NULL && !EFSYS_MEM_IS_NULL(esmp)) { efx_dword_t dword; uint32_t sync; EFSYS_MEM_READD(esmp, 4 * MC_CMD_PHYXS_SYNC, &dword); sync = EFX_DWORD_FIELD(dword, EFX_DWORD_0); stat[EFX_PHY_STAT_PHY_XS_SYNC_A] = (sync >> 0) & 1; stat[EFX_PHY_STAT_PHY_XS_SYNC_B] = (sync >> 1) & 1; stat[EFX_PHY_STAT_PHY_XS_SYNC_C] = (sync >> 2) & 1; stat[EFX_PHY_STAT_PHY_XS_SYNC_D] = (sync >> 3) & 1; } } SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_LINK_UP); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_COMPLETE); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_CL22_LINK_UP, EFX_PHY_STAT_CL22EXT_LINK_UP); if (smaskp != NULL) *smaskp = smask; } - __checkReturn int + __checkReturn efx_rc_t siena_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t vmask = encp->enc_mcdi_phy_stat_mask; uint64_t smask; efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_PHY_STATS_IN_LEN, MC_CMD_PHY_STATS_OUT_DMA_LEN)]; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_PHY_STATS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_PHY_STATS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_PHY_STATS_OUT_DMA_LEN; MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_LO, EFSYS_MEM_ADDR(esmp) & 0xffffffff); MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_HI, EFSYS_MEM_ADDR(esmp) >> 32); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN); siena_phy_decode_stats(enp, vmask, esmp, &smask, stat); EFSYS_ASSERT(smask == encp->enc_phy_stat_mask); return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (0); } #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_PHY_PROPS #if EFSYS_OPT_NAMES extern const char * siena_phy_prop_name( __in efx_nic_t *enp, __in unsigned int id) { _NOTE(ARGUNUSED(enp, id)) return (NULL); } #endif /* EFSYS_OPT_NAMES */ -extern __checkReturn int +extern __checkReturn efx_rc_t siena_phy_prop_get( __in efx_nic_t *enp, __in unsigned int id, __in uint32_t flags, __out uint32_t *valp) { _NOTE(ARGUNUSED(enp, id, flags, valp)) return (ENOTSUP); } -extern __checkReturn int +extern __checkReturn efx_rc_t siena_phy_prop_set( __in efx_nic_t *enp, __in unsigned int id, __in uint32_t val) { _NOTE(ARGUNUSED(enp, id, val)) return (ENOTSUP); } #endif /* EFSYS_OPT_PHY_PROPS */ #if EFSYS_OPT_BIST - __checkReturn int + __checkReturn efx_rc_t siena_phy_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type) { - int rc; + efx_rc_t rc; if ((rc = efx_mcdi_bist_start(enp, type)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn unsigned long siena_phy_sft9001_bist_status( __in uint16_t code) { switch (code) { case MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY: return (EFX_PHY_CABLE_STATUS_BUSY); case MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT: return (EFX_PHY_CABLE_STATUS_INTERPAIRSHORT); case MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT: return (EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT); case MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN: return (EFX_PHY_CABLE_STATUS_OPEN); case MC_CMD_POLL_BIST_SFT9001_PAIR_OK: return (EFX_PHY_CABLE_STATUS_OK); default: return (EFX_PHY_CABLE_STATUS_INVALID); } } - __checkReturn int + __checkReturn efx_rc_t siena_phy_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt __drv_when(count > 0, __notnull) uint32_t *value_maskp, __out_ecount_opt(count) __drv_when(count > 0, __notnull) unsigned long *valuesp, __in size_t count) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN, MCDI_CTL_SDU_LEN_MAX)]; uint32_t value_mask = 0; efx_mcdi_req_t req; uint32_t result; - int rc; + efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_POLL_BIST; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MCDI_CTL_SDU_LEN_MAX; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) { rc = EMSGSIZE; goto fail2; } if (count > 0) (void) memset(valuesp, '\0', count * sizeof (unsigned long)); result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT); /* Extract PHY specific results */ if (result == MC_CMD_POLL_BIST_PASSED && encp->enc_phy_type == EFX_PHY_SFT9001B && req.emr_out_length_used >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN && (type == EFX_BIST_TYPE_PHY_CABLE_SHORT || type == EFX_BIST_TYPE_PHY_CABLE_LONG)) { uint16_t word; if (count > EFX_BIST_PHY_CABLE_LENGTH_A) { if (valuesp != NULL) valuesp[EFX_BIST_PHY_CABLE_LENGTH_A] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_A); } if (count > EFX_BIST_PHY_CABLE_LENGTH_B) { if (valuesp != NULL) valuesp[EFX_BIST_PHY_CABLE_LENGTH_B] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B); value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_B); } if (count > EFX_BIST_PHY_CABLE_LENGTH_C) { if (valuesp != NULL) valuesp[EFX_BIST_PHY_CABLE_LENGTH_C] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C); value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_C); } if (count > EFX_BIST_PHY_CABLE_LENGTH_D) { if (valuesp != NULL) valuesp[EFX_BIST_PHY_CABLE_LENGTH_D] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D); value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_D); } if (count > EFX_BIST_PHY_CABLE_STATUS_A) { if (valuesp != NULL) { word = MCDI_OUT_WORD(req, POLL_BIST_OUT_SFT9001_CABLE_STATUS_A); valuesp[EFX_BIST_PHY_CABLE_STATUS_A] = siena_phy_sft9001_bist_status(word); } value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_A); } if (count > EFX_BIST_PHY_CABLE_STATUS_B) { if (valuesp != NULL) { word = MCDI_OUT_WORD(req, POLL_BIST_OUT_SFT9001_CABLE_STATUS_B); valuesp[EFX_BIST_PHY_CABLE_STATUS_B] = siena_phy_sft9001_bist_status(word); } value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_B); } if (count > EFX_BIST_PHY_CABLE_STATUS_C) { if (valuesp != NULL) { word = MCDI_OUT_WORD(req, POLL_BIST_OUT_SFT9001_CABLE_STATUS_C); valuesp[EFX_BIST_PHY_CABLE_STATUS_C] = siena_phy_sft9001_bist_status(word); } value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_C); } if (count > EFX_BIST_PHY_CABLE_STATUS_D) { if (valuesp != NULL) { word = MCDI_OUT_WORD(req, POLL_BIST_OUT_SFT9001_CABLE_STATUS_D); valuesp[EFX_BIST_PHY_CABLE_STATUS_D] = siena_phy_sft9001_bist_status(word); } value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_D); } } else if (result == MC_CMD_POLL_BIST_FAILED && encp->enc_phy_type == EFX_PHY_QLX111V && req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN && count > EFX_BIST_FAULT_CODE) { if (valuesp != NULL) valuesp[EFX_BIST_FAULT_CODE] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST); value_mask |= 1 << EFX_BIST_FAULT_CODE; } if (value_maskp != NULL) *value_maskp = value_mask; EFSYS_ASSERT(resultp != NULL); if (result == MC_CMD_POLL_BIST_RUNNING) *resultp = EFX_BIST_RESULT_RUNNING; else if (result == MC_CMD_POLL_BIST_PASSED) *resultp = EFX_BIST_RESULT_PASSED; else *resultp = EFX_BIST_RESULT_FAILED; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void siena_phy_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type) { /* There is no way to stop BIST on Siena */ _NOTE(ARGUNUSED(enp, type)) } #endif /* EFSYS_OPT_BIST */ #endif /* EFSYS_OPT_SIENA */ Index: stable/10/sys/dev/sfxge/common/siena_sram.c =================================================================== --- stable/10/sys/dev/sfxge/common/siena_sram.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/siena_sram.c (revision 293927) @@ -1,182 +1,182 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_SIENA void siena_sram_init( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_oword_t oword; uint32_t rx_base, tx_base; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); rx_base = encp->enc_buftbl_limit; tx_base = rx_base + (encp->enc_rxq_limit * EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE)); /* Initialize the transmit descriptor cache */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, tx_base); EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword); EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DC_SIZE, EFX_TXQ_DC_SIZE); EFX_BAR_WRITEO(enp, FR_AZ_TX_DC_CFG_REG, &oword); /* Initialize the receive descriptor cache */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rx_base); EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword); EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_SIZE, EFX_RXQ_DC_SIZE); EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_CFG_REG, &oword); /* Set receive descriptor pre-fetch low water mark */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_PF_LWM, 56); EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_PF_WM_REG, &oword); /* Set the event queue to use for SRAM updates */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_UPD_EVQ_ID, 0); EFX_BAR_WRITEO(enp, FR_AZ_SRM_UPD_EVQ_REG, &oword); } #if EFSYS_OPT_DIAG - __checkReturn int + __checkReturn efx_rc_t siena_sram_test( __in efx_nic_t *enp, __in efx_sram_pattern_fn_t func) { efx_oword_t oword; efx_qword_t qword; efx_qword_t verify; size_t rows; unsigned int wptr; unsigned int rptr; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); /* Reconfigure into HALF buffer table mode */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 0); EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword); /* * Move the descriptor caches up to the top of SRAM, and test * all of SRAM below them. We only miss out one row here. */ rows = SIENA_SRAM_ROWS - 1; EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rows); EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword); EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, rows + 1); EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword); /* * Write the pattern through BUF_HALF_TBL. Write * in 64 entry batches, waiting 1us in between each batch * to guarantee not to overflow the SRAM fifo */ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) { func(wptr, B_FALSE, &qword); EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword); if ((wptr - rptr) < 64 && wptr < rows - 1) continue; EFSYS_SPIN(1); for (; rptr <= wptr; ++rptr) { func(rptr, B_FALSE, &qword); EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr, &verify); if (!EFX_QWORD_IS_EQUAL(verify, qword)) { rc = EFAULT; goto fail1; } } } /* And do the same negated */ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) { func(wptr, B_TRUE, &qword); EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword); if ((wptr - rptr) < 64 && wptr < rows - 1) continue; EFSYS_SPIN(1); for (; rptr <= wptr; ++rptr) { func(rptr, B_TRUE, &qword); EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr, &verify); if (!EFX_QWORD_IS_EQUAL(verify, qword)) { rc = EFAULT; goto fail2; } } } /* Restore back to FULL buffer table mode */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1); EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword); /* * We don't need to reconfigure SRAM again because the API * requires efx_nic_fini() to be called after an sram test. */ return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); /* Restore back to FULL buffer table mode */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1); EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword); return (rc); } #endif /* EFSYS_OPT_DIAG */ #endif /* EFSYS_OPT_SIENA */ Index: stable/10/sys/dev/sfxge/common/siena_vpd.c =================================================================== --- stable/10/sys/dev/sfxge/common/siena_vpd.c (revision 293926) +++ stable/10/sys/dev/sfxge/common/siena_vpd.c (revision 293927) @@ -1,613 +1,613 @@ /*- * Copyright (c) 2009-2015 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_VPD #if EFSYS_OPT_SIENA -static __checkReturn int +static __checkReturn efx_rc_t siena_vpd_get_static( __in efx_nic_t *enp, __in unsigned int partn, __deref_out_bcount_opt(*sizep) caddr_t *svpdp, __out size_t *sizep) { siena_mc_static_config_hdr_t *scfg; caddr_t svpd; size_t size; uint8_t cksum; unsigned int vpd_offset; unsigned int vpd_length; unsigned int hdr_length; unsigned int pos; unsigned int region; - int rc; + efx_rc_t rc; EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 || partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1); /* Allocate sufficient memory for the entire static cfg area */ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0) goto fail1; EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg); if (scfg == NULL) { rc = ENOMEM; goto fail2; } if ((rc = siena_nvram_partn_read(enp, partn, 0, (caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0) goto fail3; /* Verify the magic number */ if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) != SIENA_MC_STATIC_CONFIG_MAGIC) { rc = EINVAL; goto fail4; } /* All future versions of the structure must be backwards compatable */ EFX_STATIC_ASSERT(SIENA_MC_STATIC_CONFIG_VERSION == 0); hdr_length = EFX_WORD_FIELD(scfg->length, EFX_WORD_0); vpd_offset = EFX_DWORD_FIELD(scfg->static_vpd_offset, EFX_DWORD_0); vpd_length = EFX_DWORD_FIELD(scfg->static_vpd_length, EFX_DWORD_0); /* Verify the hdr doesn't overflow the sector size */ if (hdr_length > size || vpd_offset > size || vpd_length > size || vpd_length + vpd_offset > size) { rc = EINVAL; goto fail5; } /* Read the remainder of scfg + static vpd */ region = vpd_offset + vpd_length; if (region > SIENA_NVRAM_CHUNK) { if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK, (caddr_t)scfg + SIENA_NVRAM_CHUNK, region - SIENA_NVRAM_CHUNK)) != 0) goto fail6; } /* Verify checksum */ cksum = 0; for (pos = 0; pos < hdr_length; pos++) cksum += ((uint8_t *)scfg)[pos]; if (cksum != 0) { rc = EINVAL; goto fail7; } if (vpd_length == 0) svpd = NULL; else { /* Copy the vpd data out */ EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd); if (svpd == NULL) { rc = ENOMEM; goto fail8; } memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length); } EFSYS_KMEM_FREE(enp->en_esip, size, scfg); *svpdp = svpd; *sizep = vpd_length; return (0); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); EFSYS_KMEM_FREE(enp->en_esip, size, scfg); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_vpd_init( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); caddr_t svpd = NULL; unsigned partn; size_t size = 0; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); partn = (emip->emi_port == 1) ? MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 : MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1; /* * We need the static VPD sector to present a unified static+dynamic * VPD, that is, basically on every read, write, verify cycle. Since * it should *never* change we can just cache it here. */ if ((rc = siena_vpd_get_static(enp, partn, &svpd, &size)) != 0) goto fail1; if (svpd != NULL && size > 0) { if ((rc = efx_vpd_hunk_verify(svpd, size, NULL)) != 0) goto fail2; } enp->en_u.siena.enu_svpd = svpd; enp->en_u.siena.enu_svpd_length = size; return (0); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, size, svpd); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_vpd_size( __in efx_nic_t *enp, __out size_t *sizep) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); unsigned int partn; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); /* * This function returns the total size the user should allocate * for all VPD operations. We've already cached the static vpd, * so we just need to return an upper bound on the dynamic vpd. * Since the dynamic_config structure can change under our feet, * (as version numbers are inserted), just be safe and return the * total size of the dynamic_config *sector* */ partn = (emip->emi_port == 1) ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; if ((rc = siena_nvram_partn_size(enp, partn, sizep)) != 0) goto fail1; return (0); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); siena_mc_dynamic_config_hdr_t *dcfg = NULL; unsigned int vpd_length; unsigned int vpd_offset; unsigned int dcfg_partn; size_t dcfg_size; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); dcfg_partn = (emip->emi_port == 1) ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn, B_TRUE, &dcfg, &dcfg_size)) != 0) goto fail1; vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0); vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0); if (vpd_length > size) { rc = EFAULT; /* Invalid dcfg: header bigger than sector */ goto fail2; } EFSYS_ASSERT3U(vpd_length, <=, size); memcpy(data, (caddr_t)dcfg + vpd_offset, vpd_length); /* Pad data with all-1s, consistent with update operations */ memset(data + vpd_length, 0xff, size - vpd_length); EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg); return (0); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { efx_vpd_tag_t stag; efx_vpd_tag_t dtag; efx_vpd_keyword_t skey; efx_vpd_keyword_t dkey; unsigned int scont; unsigned int dcont; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); /* * Strictly you could take the view that dynamic vpd is optional. * Instead, to conform more closely to the read/verify/reinit() * paradigm, we require dynamic vpd. siena_vpd_reinit() will * reinitialize it as required. */ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0) goto fail1; /* * Verify that there is no duplication between the static and * dynamic cfg sectors. */ if (enp->en_u.siena.enu_svpd_length == 0) goto done; dcont = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_hunk_next(data, size, &dtag, &dkey, NULL, NULL, &dcont)) != 0) goto fail2; if (dcont == 0) break; scont = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_hunk_next( enp->en_u.siena.enu_svpd, enp->en_u.siena.enu_svpd_length, &stag, &skey, NULL, NULL, &scont)) != 0) goto fail3; if (scont == 0) break; if (stag == dtag && skey == dkey) { rc = EEXIST; goto fail4; } } } done: return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { boolean_t wantpid; - int rc; + efx_rc_t rc; /* * Only create a PID if the dynamic cfg doesn't have one */ if (enp->en_u.siena.enu_svpd_length == 0) wantpid = B_TRUE; else { unsigned int offset; uint8_t length; rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd, enp->en_u.siena.enu_svpd_length, EFX_VPD_ID, 0, &offset, &length); if (rc == 0) wantpid = B_FALSE; else if (rc == ENOENT) wantpid = B_TRUE; else goto fail1; } if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp) { unsigned int offset; uint8_t length; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); /* Attempt to satisfy the request from svpd first */ if (enp->en_u.siena.enu_svpd_length > 0) { if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd, enp->en_u.siena.enu_svpd_length, evvp->evv_tag, evvp->evv_keyword, &offset, &length)) == 0) { evvp->evv_length = length; memcpy(evvp->evv_value, enp->en_u.siena.enu_svpd + offset, length); return (0); } else if (rc != ENOENT) goto fail1; } /* And then from the provided data buffer */ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag, evvp->evv_keyword, &offset, &length)) != 0) goto fail2; evvp->evv_length = length; memcpy(evvp->evv_value, data + offset, length); return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_vpd_set( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp) { - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); /* If the provided (tag,keyword) exists in svpd, then it is readonly */ if (enp->en_u.siena.enu_svpd_length > 0) { unsigned int offset; uint8_t length; if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd, enp->en_u.siena.enu_svpd_length, evvp->evv_tag, evvp->evv_keyword, &offset, &length)) == 0) { rc = EACCES; goto fail1; } } if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn int + __checkReturn efx_rc_t siena_vpd_next( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp) { _NOTE(ARGUNUSED(enp, data, size, evvp, contp)) return (ENOTSUP); } - __checkReturn int + __checkReturn efx_rc_t siena_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); siena_mc_dynamic_config_hdr_t *dcfg = NULL; unsigned int vpd_offset; unsigned int dcfg_partn; unsigned int hdr_length; unsigned int pos; uint8_t cksum; size_t partn_size, dcfg_size; size_t vpd_length; - int rc; + efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); /* Determine total length of all tags */ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0) goto fail1; /* Lock dynamic config sector for write, and read structure only */ dcfg_partn = (emip->emi_port == 1) ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0) goto fail2; if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0) goto fail3; if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn, B_FALSE, &dcfg, &dcfg_size)) != 0) goto fail4; hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0); /* Allocated memory should have room for the new VPD */ if (hdr_length + vpd_length > dcfg_size) { rc = ENOSPC; goto fail5; } /* Copy in new vpd and update header */ vpd_offset = dcfg_size - vpd_length; EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset, EFX_DWORD_0, vpd_offset); memcpy((caddr_t)dcfg + vpd_offset, data, vpd_length); EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, vpd_length); /* Update the checksum */ cksum = 0; for (pos = 0; pos < hdr_length; pos++) cksum += ((uint8_t *)dcfg)[pos]; dcfg->csum.eb_u8[0] -= cksum; /* Erase and write the new sector */ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0) goto fail6; /* Write out the new structure to nvram */ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0, (caddr_t)dcfg, vpd_offset + vpd_length)) != 0) goto fail7; EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg); siena_nvram_partn_unlock(enp, dcfg_partn); return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg); fail4: EFSYS_PROBE(fail4); siena_nvram_partn_unlock(enp, dcfg_partn); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE1(fail1, int, rc); + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void siena_vpd_fini( __in efx_nic_t *enp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); if (enp->en_u.siena.enu_svpd_length > 0) { EFSYS_KMEM_FREE(enp->en_esip, enp->en_u.siena.enu_svpd_length, enp->en_u.siena.enu_svpd); enp->en_u.siena.enu_svpd = NULL; enp->en_u.siena.enu_svpd_length = 0; } } #endif /* EFSYS_OPT_SIENA */ #endif /* EFSYS_OPT_VPD */ Index: stable/10 =================================================================== --- stable/10 (revision 293926) +++ stable/10 (revision 293927) Property changes on: stable/10 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r291436