Index: projects/runtime-coverage/Makefile.inc1 =================================================================== --- projects/runtime-coverage/Makefile.inc1 (revision 325209) +++ projects/runtime-coverage/Makefile.inc1 (revision 325210) @@ -1,2936 +1,2936 @@ # # $FreeBSD$ # # Make command line options: # -DNO_CLEANDIR run ${MAKE} clean, instead of ${MAKE} cleandir # -DNO_CLEAN do not clean at all # -DDB_FROM_SRC use the user/group databases in src/etc instead of # the system database when installing. # -DNO_SHARE do not go into share subdir # -DKERNFAST define NO_KERNEL{CONFIG,CLEAN,OBJ} # -DNO_KERNELCONFIG do not run config in ${MAKE} buildkernel # -DNO_KERNELCLEAN do not run ${MAKE} clean in ${MAKE} buildkernel # -DNO_KERNELOBJ do not run ${MAKE} obj in ${MAKE} buildkernel # -DNO_PORTSUPDATE do not update ports in ${MAKE} update # -DNO_ROOT install without using root privilege # -DNO_DOCUPDATE do not update doc in ${MAKE} update # -DWITHOUT_CTF do not run the DTrace CTF conversion tools on built objects # LOCAL_DIRS="list of dirs" to add additional dirs to the SUBDIR list # LOCAL_ITOOLS="list of tools" to add additional tools to the ITOOLS list # LOCAL_LIB_DIRS="list of dirs" to add additional dirs to libraries target # LOCAL_MTREE="list of mtree files" to process to allow local directories # to be created before files are installed # LOCAL_TOOL_DIRS="list of dirs" to add additional dirs to the build-tools # list # LOCAL_XTOOL_DIRS="list of dirs" to add additional dirs to the # cross-tools target # METALOG="path to metadata log" to write permission and ownership # when NO_ROOT is set. (default: ${DESTDIR}/METALOG) # TARGET="machine" to crossbuild world for a different machine type # TARGET_ARCH= may be required when a TARGET supports multiple endians # BUILDENV_SHELL= shell to launch for the buildenv target (def:${SHELL}) # WORLD_FLAGS= additional flags to pass to make(1) during buildworld # KERNEL_FLAGS= additional flags to pass to make(1) during buildkernel # SUBDIR_OVERRIDE="list of dirs" to build rather than everything. # All libraries and includes, and some build tools will still build. # # The intended user-driven targets are: # buildworld - rebuild *everything*, including glue to help do upgrades # installworld- install everything built by "buildworld" # checkworld - run test suite on installed world # doxygen - build API documentation of the kernel # update - convenient way to update your source tree (eg: svn/svnup) # # Standard targets (not defined here) are documented in the makefiles in # /usr/share/mk. These include: # obj depend all install clean cleandepend cleanobj .if !defined(TARGET) || !defined(TARGET_ARCH) .error "Both TARGET and TARGET_ARCH must be defined." .endif SRCDIR?= ${.CURDIR} LOCALBASE?= /usr/local # Cross toolchain changes must be in effect before bsd.compiler.mk # so that gets the right CC, and pass CROSS_TOOLCHAIN to submakes. .if defined(CROSS_TOOLCHAIN) .include "${LOCALBASE}/share/toolchains/${CROSS_TOOLCHAIN}.mk" CROSSENV+=CROSS_TOOLCHAIN="${CROSS_TOOLCHAIN}" .endif .if defined(CROSS_TOOLCHAIN_PREFIX) CROSS_COMPILER_PREFIX?=${CROSS_TOOLCHAIN_PREFIX} .endif XCOMPILERS= CC CXX CPP .for COMPILER in ${XCOMPILERS} .if defined(CROSS_COMPILER_PREFIX) X${COMPILER}?= ${CROSS_COMPILER_PREFIX}${${COMPILER}} .else X${COMPILER}?= ${${COMPILER}} .endif .endfor # If a full path to an external cross compiler is given, don't build # a cross compiler. .if ${XCC:N${CCACHE_BIN}:M/*} MK_CLANG_BOOTSTRAP= no MK_GCC_BOOTSTRAP= no .endif MAKEOBJDIRPREFIX?= /usr/obj .if ${MACHINE} == ${TARGET} && ${MACHINE_ARCH} == ${TARGET_ARCH} && !defined(CROSS_BUILD_TESTING) OBJTREE= ${MAKEOBJDIRPREFIX} .else OBJTREE= ${MAKEOBJDIRPREFIX}/${TARGET}.${TARGET_ARCH} .endif # Pull in compiler metadata from buildworld/toolchain if possible to avoid # running CC from bsd.compiler.mk. .if make(installworld) || make(install) || make(distributeworld) || \ make(stageworld) .-include "${OBJTREE}${.CURDIR}/compiler-metadata.mk" .endif # Pull in COMPILER_TYPE and COMPILER_FREEBSD_VERSION early. .include .include "share/mk/src.opts.mk" # Check if there is a local compiler that can satisfy as an external compiler. # Which compiler is expected to be used? .if ${MK_CLANG_BOOTSTRAP} == "yes" WANT_COMPILER_TYPE= clang .elif ${MK_GCC_BOOTSTRAP} == "yes" WANT_COMPILER_TYPE= gcc .else WANT_COMPILER_TYPE= .endif .if !defined(WANT_COMPILER_FREEBSD_VERSION) .if ${WANT_COMPILER_TYPE} == "clang" WANT_COMPILER_FREEBSD_VERSION_FILE= lib/clang/freebsd_cc_version.h WANT_COMPILER_FREEBSD_VERSION!= \ awk '$$2 == "FREEBSD_CC_VERSION" {printf("%d\n", $$3)}' \ ${SRCDIR}/${WANT_COMPILER_FREEBSD_VERSION_FILE} || echo unknown WANT_COMPILER_VERSION_FILE= lib/clang/include/clang/Basic/Version.inc WANT_COMPILER_VERSION!= \ awk '$$2 == "CLANG_VERSION" {split($$3, a, "."); print a[1] * 10000 + a[2] * 100 + a[3]}' \ ${SRCDIR}/${WANT_COMPILER_VERSION_FILE} || echo unknown .elif ${WANT_COMPILER_TYPE} == "gcc" WANT_COMPILER_FREEBSD_VERSION_FILE= gnu/usr.bin/cc/cc_tools/freebsd-native.h WANT_COMPILER_FREEBSD_VERSION!= \ awk '$$2 == "FBSD_CC_VER" {printf("%d\n", $$3)}' \ ${SRCDIR}/${WANT_COMPILER_FREEBSD_VERSION_FILE} || echo unknown WANT_COMPILER_VERSION_FILE= contrib/gcc/BASE-VER WANT_COMPILER_VERSION!= \ awk -F. '{print $$1 * 10000 + $$2 * 100 + $$3}' \ ${SRCDIR}/${WANT_COMPILER_VERSION_FILE} || echo unknown .endif .export WANT_COMPILER_FREEBSD_VERSION WANT_COMPILER_TYPE WANT_COMPILER_VERSION .endif # !defined(WANT_COMPILER_FREEBSD_VERSION) # It needs to be the same revision as we would build for the bootstrap. # If the expected vs CC is different then we can't skip. # GCC cannot be used for cross-arch yet. For clang we pass -target later if # TARGET_ARCH!=MACHINE_ARCH. .if ${MK_SYSTEM_COMPILER} == "yes" && \ (${MK_CLANG_BOOTSTRAP} == "yes" || ${MK_GCC_BOOTSTRAP} == "yes") && \ - !make(showconfig) && !make(native-xtools) && !make(xdev*) && \ + !make(showconfig) && !make(xdev*) && \ ${WANT_COMPILER_TYPE} == ${COMPILER_TYPE} && \ (${COMPILER_TYPE} == "clang" || ${TARGET_ARCH} == ${MACHINE_ARCH}) && \ ${COMPILER_VERSION} == ${WANT_COMPILER_VERSION} && \ ${COMPILER_FREEBSD_VERSION} == ${WANT_COMPILER_FREEBSD_VERSION} # Everything matches, disable the bootstrap compiler. MK_CLANG_BOOTSTRAP= no MK_GCC_BOOTSTRAP= no USING_SYSTEM_COMPILER= yes .endif # ${WANT_COMPILER_TYPE} == ${COMPILER_TYPE} USING_SYSTEM_COMPILER?= no TEST_SYSTEM_COMPILER_VARS= \ USING_SYSTEM_COMPILER MK_SYSTEM_COMPILER \ MK_CROSS_COMPILER MK_CLANG_BOOTSTRAP MK_GCC_BOOTSTRAP \ WANT_COMPILER_TYPE WANT_COMPILER_VERSION WANT_COMPILER_VERSION_FILE \ WANT_COMPILER_FREEBSD_VERSION WANT_COMPILER_FREEBSD_VERSION_FILE \ CC COMPILER_TYPE COMPILER_FEATURES COMPILER_VERSION \ COMPILER_FREEBSD_VERSION \ LINKER_TYPE LINKER_VERSION test-system-compiler: .PHONY .for v in ${TEST_SYSTEM_COMPILER_VARS} ${_+_}@printf "%-35s= %s\n" "${v}" "${${v}}" .endfor .if ${USING_SYSTEM_COMPILER} == "yes" && \ (make(buildworld) || make(buildkernel) || make(kernel-toolchain) || \ make(toolchain) || make(_cross-tools)) .info SYSTEM_COMPILER: Determined that CC=${CC} matches the source tree. Not bootstrapping a cross-compiler. .endif # For installworld need to ensure that the looked-up compiler metadata is # passed along rather than trying to run cc from the restricted # STRICTTMPPATH. .if ${MK_CLANG_BOOTSTRAP} == "no" && ${MK_GCC_BOOTSTRAP} == "no" .if !defined(X_COMPILER_TYPE) CROSSENV+= COMPILER_VERSION=${COMPILER_VERSION} \ COMPILER_TYPE=${COMPILER_TYPE} \ COMPILER_FEATURES=${COMPILER_FEATURES} \ COMPILER_FREEBSD_VERSION=${COMPILER_FREEBSD_VERSION} .else CROSSENV+= COMPILER_VERSION=${X_COMPILER_VERSION} \ COMPILER_FEATURES=${X_COMPILER_FEATURES} \ COMPILER_TYPE=${X_COMPILER_TYPE} \ COMPILER_FREEBSD_VERSION=${X_COMPILER_FREEBSD_VERSION} .endif .endif # Store some compiler metadata for use in installworld where we don't # want to invoke CC at all. _COMPILER_METADATA_VARS= COMPILER_VERSION \ COMPILER_TYPE \ COMPILER_FEATURES \ COMPILER_FREEBSD_VERSION \ LINKER_VERSION \ LINKER_TYPE compiler-metadata.mk: .PHONY .META @: > ${.TARGET} @echo ".info Using cached compiler metadata from build at $$(hostname) on $$(date)" \ > ${.TARGET} .for v in ${_COMPILER_METADATA_VARS} @echo "${v}=${${v}}" >> ${.TARGET} .endfor @echo ".export ${_COMPILER_METADATA_VARS}" >> ${.TARGET} # Handle external binutils. .if defined(CROSS_TOOLCHAIN_PREFIX) CROSS_BINUTILS_PREFIX?=${CROSS_TOOLCHAIN_PREFIX} .endif # If we do not have a bootstrap binutils (because the in-tree one does not # support the target architecture), provide a default cross-binutils prefix. # This allows riscv64 builds, for example, to automatically use the # riscv64-binutils port or package. .if !make(showconfig) .if !empty(BROKEN_OPTIONS:MBINUTILS_BOOTSTRAP) && \ ${MK_LLD_BOOTSTRAP} == "no" && \ !defined(CROSS_BINUTILS_PREFIX) CROSS_BINUTILS_PREFIX=/usr/local/${TARGET_ARCH}-freebsd/bin/ .if !exists(${CROSS_BINUTILS_PREFIX}) .error In-tree binutils does not support the ${TARGET_ARCH} architecture. Install the ${TARGET_ARCH}-binutils port or package or set CROSS_BINUTILS_PREFIX. .endif .endif .endif XBINUTILS= AS AR LD NM OBJCOPY RANLIB SIZE STRINGS .for BINUTIL in ${XBINUTILS} .if defined(CROSS_BINUTILS_PREFIX) && \ exists(${CROSS_BINUTILS_PREFIX}${${BINUTIL}}) X${BINUTIL}?= ${CROSS_BINUTILS_PREFIX}${${BINUTIL}} .else X${BINUTIL}?= ${${BINUTIL}} .endif .endfor # We must do lib/ and libexec/ before bin/ in case of a mid-install error to # keep the users system reasonably usable. For static->dynamic root upgrades, # we don't want to install a dynamic binary without rtld and the needed # libraries. More commonly, for dynamic root, we don't want to install a # binary that requires a newer library version that hasn't been installed yet. # This ordering is not a guarantee though. The only guarantee of a working # system here would require fine-grained ordering of all components based # on their dependencies. .if !empty(SUBDIR_OVERRIDE) SUBDIR= ${SUBDIR_OVERRIDE} .else SUBDIR= lib libexec .if !defined(NO_ROOT) && (make(installworld) || make(install)) # Ensure libraries are installed before progressing. SUBDIR+=.WAIT .endif SUBDIR+=bin .if ${MK_CDDL} != "no" SUBDIR+=cddl .endif SUBDIR+=gnu include .if ${MK_KERBEROS} != "no" SUBDIR+=kerberos5 .endif .if ${MK_RESCUE} != "no" SUBDIR+=rescue .endif SUBDIR+=sbin .if ${MK_CRYPT} != "no" SUBDIR+=secure .endif .if !defined(NO_SHARE) SUBDIR+=share .endif SUBDIR+=sys usr.bin usr.sbin .if ${MK_TESTS} != "no" SUBDIR+= tests .endif .if ${MK_OFED} != "no" SUBDIR+=contrib/ofed .endif # Local directories are last, since it is nice to at least get the base # system rebuilt before you do them. .for _DIR in ${LOCAL_DIRS} .if exists(${.CURDIR}/${_DIR}/Makefile) SUBDIR+= ${_DIR} .endif .endfor # Add LOCAL_LIB_DIRS, but only if they will not be picked up as a SUBDIR # of a LOCAL_DIRS directory. This allows LOCAL_DIRS=foo and # LOCAL_LIB_DIRS=foo/lib to behave as expected. .for _DIR in ${LOCAL_DIRS:M*/} ${LOCAL_DIRS:N*/:S|$|/|} _REDUNDANT_LIB_DIRS+= ${LOCAL_LIB_DIRS:M${_DIR}*} .endfor .for _DIR in ${LOCAL_LIB_DIRS} .if empty(_REDUNDANT_LIB_DIRS:M${_DIR}) && exists(${.CURDIR}/${_DIR}/Makefile) SUBDIR+= ${_DIR} .endif .endfor # We must do etc/ last as it hooks into building the man whatis file # by calling 'makedb' in share/man. This is only relevant for # install/distribute so they build the whatis file after every manpage is # installed. .if make(installworld) || make(install) SUBDIR+=.WAIT .endif SUBDIR+=etc .endif # !empty(SUBDIR_OVERRIDE) .if defined(NOCLEAN) .warning NOCLEAN option is deprecated. Use NO_CLEAN instead. NO_CLEAN= ${NOCLEAN} .endif .if defined(NO_CLEANDIR) CLEANDIR= clean cleandepend .else CLEANDIR= cleandir .endif .if defined(WORLDFAST) NO_CLEAN= t NO_OBJ= t .endif .if ${MK_META_MODE} == "yes" # If filemon is used then we can rely on the build being incremental-safe. # The .meta files will also track the build command and rebuild should # it change. .if empty(.MAKE.MODE:Mnofilemon) NO_CLEAN= t .endif .endif .if defined(NO_OBJ) || ${MK_AUTO_OBJ} == "yes" NO_OBJ= t NO_KERNELOBJ= t .endif .if !defined(NO_OBJ) _obj= obj .endif LOCAL_TOOL_DIRS?= PACKAGEDIR?= ${DESTDIR}/${DISTDIR} .if empty(SHELL:M*csh*) BUILDENV_SHELL?=${SHELL} .else BUILDENV_SHELL?=/bin/sh .endif .if !defined(SVN) || empty(SVN) . for _P in /usr/bin /usr/local/bin . for _S in svn svnlite . if exists(${_P}/${_S}) SVN= ${_P}/${_S} . endif . endfor . endfor .endif SVNFLAGS?= -r HEAD .if !defined(VCS_REVISION) && empty(VCS_REVISION) _VCS_REVISION?= $$(eval ${SVNVERSION_CMD} ${SRCDIR}) . if !empty(_VCS_REVISION) VCS_REVISION= $$(echo r${_VCS_REVISION}) . endif .endif .if !defined(OSRELDATE) .if exists(/usr/include/osreldate.h) OSRELDATE!= awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \ /usr/include/osreldate.h .else OSRELDATE= 0 .endif .export OSRELDATE .endif # Set VERSION for CTFMERGE to use via the default CTFFLAGS=-L VERSION. .if !defined(_REVISION) _REVISION!= ${MAKE} -C ${SRCDIR}/release MK_AUTO_OBJ=no -V REVISION .export _REVISION .endif .if !defined(_BRANCH) _BRANCH!= ${MAKE} -C ${SRCDIR}/release MK_AUTO_OBJ=no -V BRANCH .export _BRANCH .endif .if !defined(SRCRELDATE) SRCRELDATE!= awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \ ${SRCDIR}/sys/sys/param.h .export SRCRELDATE .endif .if !defined(VERSION) VERSION= FreeBSD ${_REVISION}-${_BRANCH:C/-p[0-9]+$//} ${TARGET_ARCH} ${SRCRELDATE} .export VERSION .endif .if !defined(PKG_VERSION) .if ${_BRANCH:MSTABLE*} || ${_BRANCH:MCURRENT*} || ${_BRANCH:MALPHA*} TIMENOW= %Y%m%d%H%M%S EXTRA_REVISION= .s${TIMENOW:gmtime} .endif .if ${_BRANCH:M*-p*} EXTRA_REVISION= _${_BRANCH:C/.*-p([0-9]+$)/\1/} .endif PKG_VERSION= ${_REVISION}${EXTRA_REVISION} .endif KNOWN_ARCHES?= aarch64/arm64 \ amd64 \ arm \ armeb/arm \ armv6/arm \ armv7/arm \ i386 \ mips \ mipsel/mips \ mips64el/mips \ mipsn32el/mips \ mips64/mips \ mipsn32/mips \ mipshf/mips \ mipselhf/mips \ mips64elhf/mips \ mips64hf/mips \ powerpc \ powerpc64/powerpc \ powerpcspe/powerpc \ riscv64/riscv \ riscv64sf/riscv \ sparc64 .if ${TARGET} == ${TARGET_ARCH} _t= ${TARGET} .else _t= ${TARGET_ARCH}/${TARGET} .endif .for _t in ${_t} .if empty(KNOWN_ARCHES:M${_t}) .error Unknown target ${TARGET_ARCH}:${TARGET}. .endif .endfor .if ${TARGET} == ${MACHINE} TARGET_CPUTYPE?=${CPUTYPE} .else TARGET_CPUTYPE?= .endif .if !empty(TARGET_CPUTYPE) _TARGET_CPUTYPE=${TARGET_CPUTYPE} .else _TARGET_CPUTYPE=dummy .endif _CPUTYPE!= MAKEFLAGS= CPUTYPE=${_TARGET_CPUTYPE} ${MAKE} -f /dev/null \ -m ${.CURDIR}/share/mk MK_AUTO_OBJ=no -V CPUTYPE .if ${_CPUTYPE} != ${_TARGET_CPUTYPE} .error CPUTYPE global should be set with ?=. .endif .if make(buildworld) BUILD_ARCH!= uname -p .if ${MACHINE_ARCH} != ${BUILD_ARCH} .error To cross-build, set TARGET_ARCH. .endif .endif WORLDTMP= ${OBJTREE}${.CURDIR}/tmp BPATH= ${CCACHE_WRAPPER_PATH_PFX}${WORLDTMP}/legacy/usr/sbin:${WORLDTMP}/legacy/usr/bin:${WORLDTMP}/legacy/bin XPATH= ${WORLDTMP}/usr/sbin:${WORLDTMP}/usr/bin STRICTTMPPATH= ${BPATH}:${XPATH} TMPPATH= ${STRICTTMPPATH}:${PATH} # # Avoid running mktemp(1) unless actually needed. # It may not be functional, e.g., due to new ABI # when in the middle of installing over this system. # .if make(distributeworld) || make(installworld) || make(stageworld) INSTALLTMP!= /usr/bin/mktemp -d -u -t install .endif .if make(stagekernel) || make(distributekernel) TAGS+= kernel PACKAGE= kernel .endif # # Building a world goes through the following stages # # 1. legacy stage [BMAKE] # This stage is responsible for creating compatibility # shims that are needed by the bootstrap-tools, # build-tools and cross-tools stages. These are generally # APIs that tools from one of those three stages need to # build that aren't present on the host. # 1. bootstrap-tools stage [BMAKE] # This stage is responsible for creating programs that # are needed for backward compatibility reasons. They # are not built as cross-tools. # 2. build-tools stage [TMAKE] # This stage is responsible for creating the object # tree and building any tools that are needed during # the build process. Some programs are listed during # this phase because they build binaries to generate # files needed to build these programs. This stage also # builds the 'build-tools' target rather than 'all'. # 3. cross-tools stage [XMAKE] # This stage is responsible for creating any tools that # are needed for building the system. A cross-compiler is one # of them. This differs from build tools in two ways: # 1. the 'all' target is built rather than 'build-tools' # 2. these tools are installed into TMPPATH for stage 4. # 4. world stage [WMAKE] # This stage actually builds the world. # 5. install stage (optional) [IMAKE] # This stage installs a previously built world. # BOOTSTRAPPING?= 0 # Keep these in sync -- see below for special case exception MINIMUM_SUPPORTED_OSREL?= 900044 MINIMUM_SUPPORTED_REL?= 9.1 # Common environment for world related stages CROSSENV+= MAKEOBJDIRPREFIX=${OBJTREE} \ MACHINE_ARCH=${TARGET_ARCH} \ MACHINE=${TARGET} \ CPUTYPE=${TARGET_CPUTYPE} .if ${MK_META_MODE} != "no" # Don't rebuild build-tools targets during normal build. CROSSENV+= BUILD_TOOLS_META=.NOMETA .endif .if defined(TARGET_CFLAGS) CROSSENV+= ${TARGET_CFLAGS} .endif # bootstrap-tools stage BMAKEENV= INSTALL="sh ${.CURDIR}/tools/install.sh" \ TOOLS_PREFIX=${WORLDTMP} \ PATH=${BPATH}:${PATH} \ WORLDTMP=${WORLDTMP} \ MAKEFLAGS="-m ${.CURDIR}/tools/build/mk ${.MAKEFLAGS}" # need to keep this in sync with targets/pseudo/bootstrap-tools/Makefile BSARGS= DESTDIR= \ BOOTSTRAPPING=${OSRELDATE} \ BWPHASE=${.TARGET:C,^_,,} \ SSP_CFLAGS= \ MK_COVERAGE=no MK_HTML=no NO_LINT=yes MK_MAN=no \ -DNO_PIC MK_PROFILE=no -DNO_SHARED \ -DNO_CPU_CFLAGS MK_WARNS=no MK_CTF=no \ MK_CLANG_EXTRAS=no MK_CLANG_FULL=no \ MK_LLDB=no MK_TESTS=no \ MK_INCLUDES=yes BMAKE= MAKEOBJDIRPREFIX=${WORLDTMP} \ ${BMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \ ${BSARGS} # build-tools stage TMAKE= MAKEOBJDIRPREFIX=${OBJTREE} \ ${BMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \ TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \ DESTDIR= \ BOOTSTRAPPING=${OSRELDATE} \ BWPHASE=${.TARGET:C,^_,,} \ SSP_CFLAGS= \ -DNO_LINT \ -DNO_CPU_CFLAGS MK_WARNS=no MK_CTF=no \ MK_CLANG_EXTRAS=no MK_CLANG_FULL=no \ MK_COVERAGE=no \ MK_LLDB=no MK_TESTS=no # cross-tools stage XMAKE= TOOLS_PREFIX=${WORLDTMP} ${BMAKE} \ TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \ MK_GDB=no MK_LLD_IS_LD=${MK_LLD_BOOTSTRAP} MK_TESTS=no # kernel-tools stage KTMAKEENV= INSTALL="sh ${.CURDIR}/tools/install.sh" \ PATH=${BPATH}:${PATH} \ WORLDTMP=${WORLDTMP} KTMAKE= TOOLS_PREFIX=${WORLDTMP} MAKEOBJDIRPREFIX=${WORLDTMP} \ ${KTMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \ DESTDIR= \ BOOTSTRAPPING=${OSRELDATE} \ SSP_CFLAGS= \ MK_COVERAGE=no MK_HTML=no -DNO_LINT MK_MAN=no \ -DNO_PIC MK_PROFILE=no -DNO_SHARED \ -DNO_CPU_CFLAGS MK_WARNS=no MK_CTF=no # world stage WMAKEENV= ${CROSSENV} \ INSTALL="sh ${.CURDIR}/tools/install.sh" \ PATH=${TMPPATH} \ SYSROOT=${WORLDTMP} # make hierarchy HMAKE= PATH=${TMPPATH} ${MAKE} LOCAL_MTREE=${LOCAL_MTREE:Q} .if defined(NO_ROOT) HMAKE+= PATH=${TMPPATH} METALOG=${METALOG} -DNO_ROOT .endif CROSSENV+= CC="${XCC} ${XCFLAGS}" CXX="${XCXX} ${XCXXFLAGS} ${XCFLAGS}" \ CPP="${XCPP} ${XCFLAGS}" \ AS="${XAS}" AR="${XAR}" LD="${XLD}" LLVM_LINK="${XLLVM_LINK}" \ NM=${XNM} OBJCOPY="${XOBJCOPY}" \ RANLIB=${XRANLIB} STRINGS=${XSTRINGS} \ SIZE="${XSIZE}" .if defined(CROSS_BINUTILS_PREFIX) && exists(${CROSS_BINUTILS_PREFIX}) # In the case of xdev-build tools, CROSS_BINUTILS_PREFIX won't be a # directory, but the compiler will look in the right place for its # tools so we don't need to tell it where to look. BFLAGS+= -B${CROSS_BINUTILS_PREFIX} .endif # The internal bootstrap compiler has a default sysroot set by TOOLS_PREFIX # and target set by TARGET/TARGET_ARCH. However, there are several needs to # always pass an explicit --sysroot and -target. # - External compiler needs sysroot and target flags. # - External ld needs sysroot. # - To be clear about the use of a sysroot when using the internal compiler. # - Easier debugging. # - Allowing WITH_SYSTEM_COMPILER+WITH_META_MODE to work together due to # the flip-flopping build command when sometimes using external and # sometimes using internal. # - Allow using lld which has no support for default paths. .if !defined(CROSS_BINUTILS_PREFIX) || !exists(${CROSS_BINUTILS_PREFIX}) BFLAGS+= -B${WORLDTMP}/usr/bin .endif .if ${TARGET} == "arm" .if ${TARGET_ARCH:Marmv[67]*} != "" && ${TARGET_CPUTYPE:M*soft*} == "" TARGET_ABI= gnueabihf .else TARGET_ABI= gnueabi .endif .endif .if ${WANT_COMPILER_TYPE} == gcc || \ (defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == gcc) # GCC requires -isystem and -L when using a cross-compiler. --sysroot # won't set header path and -L is used to ensure the base library path # is added before the port PREFIX library path. XCFLAGS+= -isystem ${WORLDTMP}/usr/include -L${WORLDTMP}/usr/lib # GCC requires -B to find /usr/lib/crti.o when using a cross-compiler # combined with --sysroot. XCFLAGS+= -B${WORLDTMP}/usr/lib # Force using libc++ for external GCC. .if ${X_COMPILER_TYPE} == gcc && ${X_COMPILER_VERSION} >= 40800 XCXXFLAGS+= -isystem ${WORLDTMP}/usr/include/c++/v1 -std=c++11 \ -nostdinc++ .endif .elif ${WANT_COMPILER_TYPE} == clang || \ (defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == clang) MACHINE_ABI?= unknown MACHINE_TRIPLE?=${MACHINE_ARCH:C/amd64/x86_64/}-${MACHINE_ABI}-freebsd12.0 TARGET_ABI?= unknown TARGET_TRIPLE?= ${TARGET_ARCH:C/amd64/x86_64/}-${TARGET_ABI}-freebsd12.0 XCFLAGS+= -target ${TARGET_TRIPLE} .endif XCFLAGS+= --sysroot=${WORLDTMP} .if !empty(BFLAGS) XCFLAGS+= ${BFLAGS} .endif .if ${MK_LIB32} != "no" && (${TARGET_ARCH} == "amd64" || \ ${TARGET_ARCH} == "powerpc64" || ${TARGET_ARCH:Mmips64*} != "") LIBCOMPAT= 32 .include "Makefile.libcompat" .elif ${MK_LIBSOFT} != "no" && ${TARGET_ARCH:Marmv[67]*} != "" LIBCOMPAT= SOFT .include "Makefile.libcompat" .endif # META_MODE normally ignores host file changes since every build updates # timestamps (see NO_META_IGNORE_HOST in sys.mk). There are known times # when the ABI breaks though that we want to force rebuilding WORLDTMP # to get updated host tools. .if ${MK_META_MODE} == "yes" && defined(NO_CLEAN) && \ !defined(NO_META_IGNORE_HOST) && !defined(NO_META_IGNORE_HOST_HEADERS) && \ !make(showconfig) # r318736 - ino64 major ABI breakage META_MODE_BAD_ABI_VERS+= 1200031 .if !defined(OBJDIR_HOST_OSRELDATE) .if exists(${OBJTREE}${.CURDIR}/host-osreldate.h) OBJDIR_HOST_OSRELDATE!= \ awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \ ${OBJTREE}${.CURDIR}/host-osreldate.h .elif exists(${WORLDTMP}/usr/include/osreldate.h) OBJDIR_HOST_OSRELDATE= 0 .endif .export OBJDIR_HOST_OSRELDATE .endif # Note that this logic is the opposite of normal BOOTSTRAP handling. We want # to compare the WORLDTMP's OSRELDATE to the host's OSRELDATE. If the WORLDTMP # is older than the ABI-breakage OSRELDATE of the HOST then we rebuild. .if defined(OBJDIR_HOST_OSRELDATE) .for _ver in ${META_MODE_BAD_ABI_VERS} .if ${OSRELDATE} >= ${_ver} && ${OBJDIR_HOST_OSRELDATE} < ${_ver} _meta_mode_need_rebuild= ${_ver} .endif .endfor .if defined(_meta_mode_need_rebuild) .info META_MODE: Rebuilding host tools due to ABI breakage in __FreeBSD_version ${_meta_mode_need_rebuild}. NO_META_IGNORE_HOST_HEADERS= 1 .export NO_META_IGNORE_HOST_HEADERS .endif # defined(_meta_mode_need_rebuild) .endif # defined(OBJDIR_HOST_OSRELDATE) .endif # ${MK_META_MODE} == "yes" && defined(NO_CLEAN) ... # This is only used for META_MODE+filemon to track what the oldest # __FreeBSD_version is in WORLDTMP. This purposely does NOT have # a make dependency on /usr/include/osreldate.h as the file should # only be copied when it is missing or meta mode determines it has changed. # Since host files are normally ignored without NO_META_IGNORE_HOST # the file will never be updated unless that flag is specified. This # allows tracking the oldest osreldate to force rebuilds via # META_MODE_BADABI_REVS above. host-osreldate.h: # DO NOT ADD /usr/include/osreldate.h here @cp -f /usr/include/osreldate.h ${.TARGET} WMAKE= ${WMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \ BWPHASE=${.TARGET:C,^_,,} \ DESTDIR=${WORLDTMP} IMAKEENV= ${CROSSENV} IMAKE= ${IMAKEENV} ${MAKE} -f Makefile.inc1 \ ${IMAKE_INSTALL} ${IMAKE_MTREE} .if empty(.MAKEFLAGS:M-n) IMAKEENV+= PATH=${STRICTTMPPATH}:${INSTALLTMP} \ LD_LIBRARY_PATH=${INSTALLTMP} \ PATH_LOCALE=${INSTALLTMP}/locale IMAKE+= __MAKE_SHELL=${INSTALLTMP}/sh .else IMAKEENV+= PATH=${TMPPATH}:${INSTALLTMP} .endif .if defined(DB_FROM_SRC) INSTALLFLAGS+= -N ${.CURDIR}/etc MTREEFLAGS+= -N ${.CURDIR}/etc .endif _INSTALL_DDIR= ${DESTDIR}/${DISTDIR} INSTALL_DDIR= ${_INSTALL_DDIR:S://:/:g:C:/$::} .if defined(NO_ROOT) METALOG?= ${DESTDIR}/${DISTDIR}/METALOG METALOG:= ${METALOG:C,//+,/,g} IMAKE+= -DNO_ROOT METALOG=${METALOG} INSTALLFLAGS+= -U -M ${METALOG} -D ${INSTALL_DDIR} MTREEFLAGS+= -W .endif .if defined(BUILD_PKGS) INSTALLFLAGS+= -h sha256 .endif .if defined(DB_FROM_SRC) || defined(NO_ROOT) IMAKE_INSTALL= INSTALL="install ${INSTALLFLAGS}" IMAKE_MTREE= MTREE_CMD="mtree ${MTREEFLAGS}" .endif # kernel stage KMAKEENV= ${WMAKEENV:NSYSROOT=*} KMAKE= ${KMAKEENV} ${MAKE} ${.MAKEFLAGS} ${KERNEL_FLAGS} KERNEL=${INSTKERNNAME} # # buildworld # # Attempt to rebuild the entire system, with reasonable chance of # success, regardless of how old your existing system is. # _sanity_check: .PHONY .MAKE .if ${.CURDIR:C/[^,]//g} != "" # The m4 build of sendmail files doesn't like it if ',' is used # anywhere in the path of it's files. @echo @echo "*** Error: path to source tree contains a comma ','" @echo @false .elif ${.CURDIR:M*\:*} != "" # Using ':' leaks into PATH and breaks finding cross-tools. @echo @echo "*** Error: path to source tree contains a colon ':'" @echo @false .endif # Our current approach to dependency tracking cannot cope with certain source # tree changes, particularly with respect to removing source files and # replacing generated files. Handle these cases here in an ad-hoc fashion. _cleanobj_fast_depend_hack: .PHONY # Syscall stubs rewritten in C # Date SVN Rev Syscalls # 20160829 r305012 ptrace # 20170624 r320278 fstat fstatat fstatfs getdirentries getfsstat statfs .for f in fstat fstatat fstatfs getdirentries getfsstat ptrace statfs .if exists(${OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.o) @if egrep -qw '${f}\.[sS]' \ ${OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.o; then \ echo Removing stale dependencies for ${f} syscall wrappers; \ rm -f ${OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.* \ ${LIBCOMPAT:D${LIBCOMPAT_OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.*}; \ fi .endif .endfor # 20170607 remove stale dependencies for utimens* wrappers removed in r319663 .for f in futimens utimensat .if exists(${OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.o) @if egrep -q '/${f}.c' \ ${OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.o; then \ echo Removing stale dependencies for ${f} syscall wrappers; \ rm -f ${OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.* \ ${LIBCOMPAT:D${LIBCOMPAT_OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.*}; \ fi .endif .endfor # 20170523 remove stale generated asm files for functions which are no longer # syscalls after r302092 (pipe) and r318736 (others) .for f in getdents lstat mknod pipe stat .if exists(${OBJTREE}${.CURDIR}/lib/libc/${f}.s) || \ exists(${OBJTREE}${.CURDIR}/lib/libc/${f}.S) @echo Removing stale generated ${f} syscall files @rm -f ${OBJTREE}${.CURDIR}/lib/libc/${f}.* \ ${OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.* \ ${LIBCOMPAT:D${LIBCOMPAT_OBJTREE}${.CURDIR}/lib/libc/${f}.*} \ ${LIBCOMPAT:D${LIBCOMPAT_OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.*} .endif .endfor _worldtmp: .PHONY @echo @echo "--------------------------------------------------------------" @echo ">>> Rebuilding the temporary build tree" @echo "--------------------------------------------------------------" .if !defined(NO_CLEAN) rm -rf ${WORLDTMP} .else .if exists(${WORLDTMP}) @echo ">>> Deleting stale files in build tree..." ${_+_}cd ${.CURDIR}; ${WMAKE} -DBATCH_DELETE_OLD_FILES \ delete-old delete-old-libs >/dev/null .endif rm -rf ${WORLDTMP}/legacy/usr/include .if ${USING_SYSTEM_COMPILER} == "yes" .for cc in cc c++ if [ -x ${WORLDTMP}/usr/bin/${cc} ]; then \ inum=$$(stat -f %i ${WORLDTMP}/usr/bin/${cc}); \ find ${WORLDTMP}/usr/bin -inum $${inum} -delete; \ fi .endfor .endif # ${USING_SYSTEM_COMPILER} == "yes" .endif # !defined(NO_CLEAN) .for _dir in \ lib lib/casper usr legacy/bin legacy/usr mkdir -p ${WORLDTMP}/${_dir} .endfor mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${WORLDTMP}/legacy/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${WORLDTMP}/legacy/usr/include >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${WORLDTMP}/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${WORLDTMP}/usr/include >/dev/null ln -sf ${.CURDIR}/sys ${WORLDTMP} .if ${MK_DEBUG_FILES} != "no" mtree -deU -f ${.CURDIR}/etc/mtree/BSD.debug.dist \ -p ${WORLDTMP}/legacy/usr/lib >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.debug.dist \ -p ${WORLDTMP}/usr/lib >/dev/null .endif .for _mtree in ${LOCAL_MTREE} mtree -deU -f ${.CURDIR}/${_mtree} -p ${WORLDTMP} > /dev/null .endfor _legacy: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 1.1: legacy release compatibility shims" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${BMAKE} legacy _bootstrap-tools: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 1.2: bootstrap tools" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${BMAKE} bootstrap-tools _cleanobj: .if !defined(NO_CLEAN) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.1: cleaning up the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} ${CLEANDIR} .if defined(LIBCOMPAT) ${_+_}cd ${.CURDIR}; ${LIBCOMPATWMAKE} -f Makefile.inc1 ${CLEANDIR} .endif .else ${_+_}cd ${.CURDIR}; ${WMAKE} _cleanobj_fast_depend_hack .endif # !defined(NO_CLEAN) _obj: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.2: rebuilding the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} obj _build-tools: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.3: build tools" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${TMAKE} build-tools _cross-tools: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 3: cross tools" @echo "--------------------------------------------------------------" @rm -f ${OBJTREE}${.CURDIR}/compiler-metadata.mk ${_+_}cd ${.CURDIR}; ${XMAKE} cross-tools ${_+_}cd ${.CURDIR}; ${XMAKE} kernel-tools _build-metadata: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 3.1: recording build metadata" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} compiler-metadata.mk ${_+_}cd ${.CURDIR}; ${WMAKE} host-osreldate.h _includes: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.1: building includes" @echo "--------------------------------------------------------------" # Special handling for SUBDIR_OVERRIDE in buildworld as they most likely need # headers from default SUBDIR. Do SUBDIR_OVERRIDE includes last. ${_+_}cd ${.CURDIR}; ${WMAKE} SUBDIR_OVERRIDE= SHARED=symlinks \ MK_INCLUDES=yes includes .if !empty(SUBDIR_OVERRIDE) && make(buildworld) ${_+_}cd ${.CURDIR}; ${WMAKE} MK_INCLUDES=yes SHARED=symlinks includes .endif _libraries: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.2: building libraries" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; \ ${WMAKE} -DNO_FSCHG MK_HTML=no -DNO_LINT MK_MAN=no \ MK_PROFILE=no MK_TESTS=no MK_TESTS_SUPPORT=${MK_TESTS} libraries everything: .PHONY @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.3: building everything" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; _PARALLEL_SUBDIR_OK=1 ${WMAKE} all WMAKE_TGTS= .if !defined(WORLDFAST) WMAKE_TGTS+= _sanity_check _worldtmp _legacy .if empty(SUBDIR_OVERRIDE) WMAKE_TGTS+= _bootstrap-tools .endif WMAKE_TGTS+= _cleanobj .if !defined(NO_OBJ) WMAKE_TGTS+= _obj .endif WMAKE_TGTS+= _build-tools _cross-tools WMAKE_TGTS+= _build-metadata WMAKE_TGTS+= _includes .endif .if !defined(NO_LIBS) WMAKE_TGTS+= _libraries .endif WMAKE_TGTS+= everything .if defined(LIBCOMPAT) && empty(SUBDIR_OVERRIDE) WMAKE_TGTS+= build${libcompat} .endif buildworld: buildworld_prologue ${WMAKE_TGTS} buildworld_epilogue .PHONY .ORDER: buildworld_prologue ${WMAKE_TGTS} buildworld_epilogue buildworld_prologue: .PHONY @echo "--------------------------------------------------------------" @echo ">>> World build started on `LC_ALL=C date`" @echo "--------------------------------------------------------------" buildworld_epilogue: .PHONY @echo @echo "--------------------------------------------------------------" @echo ">>> World build completed on `LC_ALL=C date`" @echo "--------------------------------------------------------------" # # We need to have this as a target because the indirection between Makefile # and Makefile.inc1 causes the correct PATH to be used, rather than a # modification of the current environment's PATH. In addition, we need # to quote multiword values. # buildenvvars: .PHONY @echo ${WMAKEENV:Q} ${.MAKE.EXPORTED:@v@$v=\"${$v}\"@} .if ${.TARGETS:Mbuildenv} .if ${.MAKEFLAGS:M-j} .error The buildenv target is incompatible with -j .endif .endif BUILDENV_DIR?= ${.CURDIR} buildenv: .PHONY @echo Entering world for ${TARGET_ARCH}:${TARGET} .if ${BUILDENV_SHELL:M*zsh*} @echo For ZSH you must run: export CPUTYPE=${TARGET_CPUTYPE} .endif @cd ${BUILDENV_DIR} && env ${WMAKEENV} BUILDENV=1 ${BUILDENV_SHELL} TOOLCHAIN_TGTS= ${WMAKE_TGTS:Neverything:Nbuild${libcompat}} toolchain: ${TOOLCHAIN_TGTS} .PHONY kernel-toolchain: ${TOOLCHAIN_TGTS:N_includes:N_libraries} .PHONY # # installcheck # # Checks to be sure system is ready for installworld/installkernel. # installcheck: _installcheck_world _installcheck_kernel .PHONY _installcheck_world: .PHONY _installcheck_kernel: .PHONY # # Require DESTDIR to be set if installing for a different architecture or # using the user/group database in the source tree. # .if ${TARGET_ARCH} != ${MACHINE_ARCH} || ${TARGET} != ${MACHINE} || \ defined(DB_FROM_SRC) .if !make(distributeworld) _installcheck_world: __installcheck_DESTDIR _installcheck_kernel: __installcheck_DESTDIR __installcheck_DESTDIR: .PHONY .if !defined(DESTDIR) || empty(DESTDIR) @echo "ERROR: Please set DESTDIR!"; \ false .endif .endif .endif .if !defined(DB_FROM_SRC) # # Check for missing UIDs/GIDs. # CHECK_UIDS= auditdistd CHECK_GIDS= audit .if ${MK_SENDMAIL} != "no" CHECK_UIDS+= smmsp CHECK_GIDS+= smmsp .endif .if ${MK_PF} != "no" CHECK_UIDS+= proxy CHECK_GIDS+= proxy authpf .endif .if ${MK_UNBOUND} != "no" CHECK_UIDS+= unbound CHECK_GIDS+= unbound .endif _installcheck_world: __installcheck_UGID __installcheck_UGID: .PHONY .for uid in ${CHECK_UIDS} @if ! `id -u ${uid} >/dev/null 2>&1`; then \ echo "ERROR: Required ${uid} user is missing, see /usr/src/UPDATING."; \ false; \ fi .endfor .for gid in ${CHECK_GIDS} @if ! `find / -prune -group ${gid} >/dev/null 2>&1`; then \ echo "ERROR: Required ${gid} group is missing, see /usr/src/UPDATING."; \ false; \ fi .endfor .endif # # If installing over the running system (DESTDIR is / or unset) and the install # includes rescue, try running rescue from the objdir as a sanity check. If # rescue is not functional (e.g., because it depends on a system call not # supported by the currently running kernel), abort the installation. # .if !make(distributeworld) && ${MK_RESCUE} != "no" && \ (empty(DESTDIR) || ${DESTDIR} == "/") && empty(BYPASS_INSTALLCHECK_SH) _installcheck_world: __installcheck_sh_check __installcheck_sh_check: .PHONY @if [ "`${OBJTREE}${.CURDIR}/rescue/rescue/rescue sh -c 'echo OK'`" != \ OK ]; then \ echo "rescue/sh check failed, installation aborted" >&2; \ false; \ fi .endif # # Required install tools to be saved in a scratch dir for safety. # .if ${MK_ZONEINFO} != "no" _zoneinfo= zic tzsetup .endif ITOOLS= [ awk cap_mkdb cat chflags chmod chown cmp cp \ date echo egrep find grep id install ${_install-info} \ ln make mkdir mtree mv pwd_mkdb \ rm sed services_mkdb sh strip sysctl test true uname wc ${_zoneinfo} \ ${LOCAL_ITOOLS} # Needed for share/man .if ${MK_MAN_UTILS} != "no" ITOOLS+=makewhatis .endif # # distributeworld # # Distributes everything compiled by a `buildworld'. # # installworld # # Installs everything compiled by a 'buildworld'. # # Non-base distributions produced by the base system EXTRA_DISTRIBUTIONS= doc .if defined(LIBCOMPAT) EXTRA_DISTRIBUTIONS+= lib${libcompat} .endif .if ${MK_TESTS} != "no" EXTRA_DISTRIBUTIONS+= tests .endif DEBUG_DISTRIBUTIONS= .if ${MK_DEBUG_FILES} != "no" DEBUG_DISTRIBUTIONS+= base ${EXTRA_DISTRIBUTIONS:S,doc,,:S,tests,,} .endif MTREE_MAGIC?= mtree 2.0 distributeworld installworld stageworld: _installcheck_world .PHONY mkdir -p ${INSTALLTMP} progs=$$(for prog in ${ITOOLS}; do \ if progpath=`which $$prog`; then \ echo $$progpath; \ else \ echo "Required tool $$prog not found in PATH." >&2; \ exit 1; \ fi; \ done); \ libs=$$(ldd -f "%o %p\n" -f "%o %p\n" $$progs 2>/dev/null | sort -u | \ while read line; do \ set -- $$line; \ if [ "$$2 $$3" != "not found" ]; then \ echo $$2; \ else \ echo "Required library $$1 not found." >&2; \ exit 1; \ fi; \ done); \ cp $$libs $$progs ${INSTALLTMP} cp -R $${PATH_LOCALE:-"/usr/share/locale"} ${INSTALLTMP}/locale .if defined(NO_ROOT) -mkdir -p ${METALOG:H} echo "#${MTREE_MAGIC}" > ${METALOG} .endif .if make(distributeworld) .for dist in ${EXTRA_DISTRIBUTIONS} -mkdir ${DESTDIR}/${DISTDIR}/${dist} mtree -deU -f ${.CURDIR}/etc/mtree/BSD.root.dist \ -p ${DESTDIR}/${DISTDIR}/${dist} >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/include >/dev/null .if ${MK_COVERAGE} != "no" mtree -deU -f ${.CURDIR}/etc/mtree/BSD.cov.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib >/dev/null .endif .if ${MK_DEBUG_FILES} != "no" mtree -deU -f ${.CURDIR}/etc/mtree/BSD.debug.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib >/dev/null .endif .if defined(LIBCOMPAT) mtree -deU -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr >/dev/null .if ${MK_DEBUG_FILES} != "no" mtree -deU -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib/debug/usr >/dev/null .endif .endif .if ${MK_TESTS} != "no" && ${dist} == "tests" -mkdir -p ${DESTDIR}/${DISTDIR}/${dist}${TESTSBASE} mtree -deU -f ${.CURDIR}/etc/mtree/BSD.tests.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}${TESTSBASE} >/dev/null .if ${MK_DEBUG_FILES} != "no" mtree -deU -f ${.CURDIR}/etc/mtree/BSD.tests.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib/debug/${TESTSBASE} >/dev/null .endif .endif .if defined(NO_ROOT) ${IMAKEENV} mtree -C -f ${.CURDIR}/etc/mtree/BSD.root.dist | \ sed -e 's#^\./#./${dist}/#' >> ${METALOG} ${IMAKEENV} mtree -C -f ${.CURDIR}/etc/mtree/BSD.usr.dist | \ sed -e 's#^\./#./${dist}/usr/#' >> ${METALOG} ${IMAKEENV} mtree -C -f ${.CURDIR}/etc/mtree/BSD.include.dist | \ sed -e 's#^\./#./${dist}/usr/include/#' >> ${METALOG} .if defined(LIBCOMPAT) ${IMAKEENV} mtree -C -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist | \ sed -e 's#^\./#./${dist}/usr/#' >> ${METALOG} .endif .endif .endfor -mkdir ${DESTDIR}/${DISTDIR}/base ${_+_}cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH} ${MAKE} \ METALOG=${METALOG} ${IMAKE_INSTALL} ${IMAKE_MTREE} \ DISTBASE=/base DESTDIR=${DESTDIR}/${DISTDIR}/base \ LOCAL_MTREE=${LOCAL_MTREE:Q} distrib-dirs .endif ${_+_}cd ${.CURDIR}; ${IMAKE} re${.TARGET:S/world$//}; \ ${IMAKEENV} rm -rf ${INSTALLTMP} .if make(distributeworld) .for dist in ${EXTRA_DISTRIBUTIONS} find ${DESTDIR}/${DISTDIR}/${dist} -mindepth 1 -type d -empty -delete .endfor .if defined(NO_ROOT) .for dist in base ${EXTRA_DISTRIBUTIONS} @# For each file that exists in this dist, print the corresponding @# line from the METALOG. This relies on the fact that @# a line containing only the filename will sort immediately before @# the relevant mtree line. cd ${DESTDIR}/${DISTDIR}; \ find ./${dist} | sort -u ${METALOG} - | \ awk 'BEGIN { print "#${MTREE_MAGIC}" } !/ type=/ { file = $$1 } / type=/ { if ($$1 == file) { sub(/^\.\/${dist}\//, "./"); print } }' > \ ${DESTDIR}/${DISTDIR}/${dist}.meta .endfor .for dist in ${DEBUG_DISTRIBUTIONS} @# For each file that exists in this dist, print the corresponding @# line from the METALOG. This relies on the fact that @# a line containing only the filename will sort immediately before @# the relevant mtree line. cd ${DESTDIR}/${DISTDIR}; \ find ./${dist}/usr/lib/debug | sort -u ${METALOG} - | \ awk 'BEGIN { print "#${MTREE_MAGIC}" } !/ type=/ { file = $$1 } / type=/ { if ($$1 == file) { sub(/^\.\/${dist}\//, "./"); print } }' > \ ${DESTDIR}/${DISTDIR}/${dist}.debug.meta .endfor .endif .endif packageworld: .PHONY .for dist in base ${EXTRA_DISTRIBUTIONS} .if defined(NO_ROOT) ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvf - --exclude usr/lib/debug \ @${DESTDIR}/${DISTDIR}/${dist}.meta | \ ${XZ_CMD} > ${PACKAGEDIR}/${dist}.txz .else ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvf - --exclude usr/lib/debug . | \ ${XZ_CMD} > ${PACKAGEDIR}/${dist}.txz .endif .endfor .for dist in ${DEBUG_DISTRIBUTIONS} . if defined(NO_ROOT) ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvf - @${DESTDIR}/${DISTDIR}/${dist}.debug.meta | \ ${XZ_CMD} > ${PACKAGEDIR}/${dist}-dbg.txz . else ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvLf - usr/lib/debug | \ ${XZ_CMD} > ${PACKAGEDIR}/${dist}-dbg.txz . endif .endfor # # reinstall # # If you have a build server, you can NFS mount the source and obj directories # and do a 'make reinstall' on the *client* to install new binaries from the # most recent server build. # restage reinstall: .MAKE .PHONY @echo "--------------------------------------------------------------" @echo ">>> Making hierarchy" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 \ LOCAL_MTREE=${LOCAL_MTREE:Q} hierarchy .if make(restage) @echo "--------------------------------------------------------------" @echo ">>> Making distribution" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 \ LOCAL_MTREE=${LOCAL_MTREE:Q} distribution .endif @echo @echo "--------------------------------------------------------------" @echo ">>> Installing everything" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 install .if defined(LIBCOMPAT) ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 install${libcompat} .endif redistribute: .MAKE .PHONY @echo "--------------------------------------------------------------" @echo ">>> Distributing everything" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 distribute .if defined(LIBCOMPAT) ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 distribute${libcompat} \ DISTRIBUTION=lib${libcompat} .endif distrib-dirs distribution: .MAKE .PHONY ${_+_}cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH} ${MAKE} \ ${IMAKE_INSTALL} ${IMAKE_MTREE} METALOG=${METALOG} ${.TARGET} .if make(distribution) ${_+_}cd ${.CURDIR}; ${CROSSENV} PATH=${TMPPATH} \ ${MAKE} -f Makefile.inc1 ${IMAKE_INSTALL} \ METALOG=${METALOG} MK_TESTS=no installconfig .endif # # buildkernel and installkernel # # Which kernels to build and/or install is specified by setting # KERNCONF. If not defined a GENERIC kernel is built/installed. # Only the existing (depending TARGET) config files are used # for building kernels and only the first of these is designated # as the one being installed. # # Note that we have to use TARGET instead of TARGET_ARCH when # we're in kernel-land. Since only TARGET_ARCH is (expected) to # be set to cross-build, we have to make sure TARGET is set # properly. .if defined(KERNFAST) NO_KERNELCLEAN= t NO_KERNELCONFIG= t NO_KERNELOBJ= t # Shortcut for KERNCONF=Blah -DKERNFAST is now KERNFAST=Blah .if !defined(KERNCONF) && ${KERNFAST} != "1" KERNCONF=${KERNFAST} .endif .endif .if ${TARGET_ARCH} == "powerpc64" KERNCONF?= GENERIC64 .else KERNCONF?= GENERIC .endif INSTKERNNAME?= kernel KERNSRCDIR?= ${.CURDIR}/sys KRNLCONFDIR= ${KERNSRCDIR}/${TARGET}/conf KRNLOBJDIR= ${OBJTREE}${KERNSRCDIR} KERNCONFDIR?= ${KRNLCONFDIR} BUILDKERNELS= INSTALLKERNEL= .if defined(NO_INSTALLKERNEL) # All of the BUILDKERNELS loops start at index 1. BUILDKERNELS+= dummy .endif .for _kernel in ${KERNCONF} .if exists(${KERNCONFDIR}/${_kernel}) BUILDKERNELS+= ${_kernel} .if empty(INSTALLKERNEL) && !defined(NO_INSTALLKERNEL) INSTALLKERNEL= ${_kernel} .endif .else .if make(buildkernel) .error Missing KERNCONF ${KERNCONFDIR}/${_kernel} .endif .endif .endfor ${WMAKE_TGTS:N_worldtmp:Nbuild${libcompat}} ${.ALLTARGETS:M_*:N_worldtmp}: .MAKE .PHONY # # buildkernel # # Builds all kernels defined by BUILDKERNELS. # buildkernel: .MAKE .PHONY .if empty(BUILDKERNELS:Ndummy) @echo "ERROR: Missing kernel configuration file(s) (${KERNCONF})."; \ false .endif @echo .for _kernel in ${BUILDKERNELS:Ndummy} @echo "--------------------------------------------------------------" @echo ">>> Kernel build for ${_kernel} started on `LC_ALL=C date`" @echo "--------------------------------------------------------------" @echo "===> ${_kernel}" mkdir -p ${KRNLOBJDIR} .if !defined(NO_KERNELCONFIG) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 1: configuring the kernel" @echo "--------------------------------------------------------------" cd ${KRNLCONFDIR}; \ PATH=${TMPPATH} \ config ${CONFIGARGS} -d ${KRNLOBJDIR}/${_kernel} \ -I '${KERNCONFDIR}' '${KERNCONFDIR}/${_kernel}' .endif .if !defined(NO_CLEAN) && !defined(NO_KERNELCLEAN) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.1: cleaning up the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} ${CLEANDIR} .endif .if !defined(NO_KERNELOBJ) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.2: rebuilding the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} obj .endif @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.3: build tools" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${KTMAKE} kernel-tools @echo @echo "--------------------------------------------------------------" @echo ">>> stage 3.1: building everything" @echo "--------------------------------------------------------------" ${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} all -DNO_MODULES_OBJ @echo "--------------------------------------------------------------" @echo ">>> Kernel build for ${_kernel} completed on `LC_ALL=C date`" @echo "--------------------------------------------------------------" .endfor NO_INSTALLEXTRAKERNELS?= yes # # installkernel, etc. # # Install the kernel defined by INSTALLKERNEL # installkernel installkernel.debug \ reinstallkernel reinstallkernel.debug: _installcheck_kernel .PHONY .if !defined(NO_INSTALLKERNEL) .if empty(INSTALLKERNEL) @echo "ERROR: No kernel \"${KERNCONF}\" to install."; \ false .endif @echo "--------------------------------------------------------------" @echo ">>> Installing kernel ${INSTALLKERNEL}" @echo "--------------------------------------------------------------" cd ${KRNLOBJDIR}/${INSTALLKERNEL}; \ ${CROSSENV} PATH=${TMPPATH} \ ${MAKE} ${IMAKE_INSTALL} KERNEL=${INSTKERNNAME} ${.TARGET:S/kernel//} .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} @echo "--------------------------------------------------------------" @echo ">>> Installing kernel ${_kernel}" @echo "--------------------------------------------------------------" cd ${KRNLOBJDIR}/${_kernel}; \ ${CROSSENV} PATH=${TMPPATH} \ ${MAKE} ${IMAKE_INSTALL} KERNEL=${INSTKERNNAME}.${_kernel} ${.TARGET:S/kernel//} .endfor .endif distributekernel distributekernel.debug: .PHONY .if !defined(NO_INSTALLKERNEL) .if empty(INSTALLKERNEL) @echo "ERROR: No kernel \"${KERNCONF}\" to install."; \ false .endif mkdir -p ${DESTDIR}/${DISTDIR} .if defined(NO_ROOT) @echo "#${MTREE_MAGIC}" > ${DESTDIR}/${DISTDIR}/kernel.premeta .endif cd ${KRNLOBJDIR}/${INSTALLKERNEL}; \ ${IMAKEENV} ${IMAKE_INSTALL:S/METALOG/kernel.premeta/} \ ${IMAKE_MTREE} PATH=${TMPPATH} ${MAKE} KERNEL=${INSTKERNNAME} \ DESTDIR=${INSTALL_DDIR}/kernel \ ${.TARGET:S/distributekernel/install/} .if defined(NO_ROOT) @sed -e 's|^./kernel|.|' ${DESTDIR}/${DISTDIR}/kernel.premeta > \ ${DESTDIR}/${DISTDIR}/kernel.meta .endif .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} .if defined(NO_ROOT) @echo "#${MTREE_MAGIC}" > ${DESTDIR}/${DISTDIR}/kernel.${_kernel}.premeta .endif cd ${KRNLOBJDIR}/${_kernel}; \ ${IMAKEENV} ${IMAKE_INSTALL:S/METALOG/kernel.${_kernel}.premeta/} \ ${IMAKE_MTREE} PATH=${TMPPATH} ${MAKE} \ KERNEL=${INSTKERNNAME}.${_kernel} \ DESTDIR=${INSTALL_DDIR}/kernel.${_kernel} \ ${.TARGET:S/distributekernel/install/} .if defined(NO_ROOT) @sed -e "s|^./kernel.${_kernel}|.|" \ ${DESTDIR}/${DISTDIR}/kernel.${_kernel}.premeta > \ ${DESTDIR}/${DISTDIR}/kernel.${_kernel}.meta .endif .endfor .endif packagekernel: .PHONY .if defined(NO_ROOT) .if !defined(NO_INSTALLKERNEL) cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvf - --exclude '*.debug' \ @${DESTDIR}/${DISTDIR}/kernel.meta | \ ${XZ_CMD} > ${PACKAGEDIR}/kernel.txz .endif .if ${MK_DEBUG_FILES} != "no" cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvf - --include '*/*/*.debug' \ @${DESTDIR}/${DISTDIR}/kernel.meta | \ ${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel-dbg.txz .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvf - --exclude '*.debug' \ @${DESTDIR}/${DISTDIR}/kernel.${_kernel}.meta | \ ${XZ_CMD} > ${PACKAGEDIR}/kernel.${_kernel}.txz .if ${MK_DEBUG_FILES} != "no" cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvf - --include '*/*/*.debug' \ @${DESTDIR}/${DISTDIR}/kernel.${_kernel}.meta | \ ${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel.${_kernel}-dbg.txz .endif .endfor .endif .else .if !defined(NO_INSTALLKERNEL) cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvf - --exclude '*.debug' . | \ ${XZ_CMD} > ${PACKAGEDIR}/kernel.txz .endif .if ${MK_DEBUG_FILES} != "no" cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvf - --include '*/*/*.debug' $$(eval find .) | \ ${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel-dbg.txz .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvf - --exclude '*.debug' . | \ ${XZ_CMD} > ${PACKAGEDIR}/kernel.${_kernel}.txz .if ${MK_DEBUG_FILES} != "no" cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvf - --include '*/*/*.debug' $$(eval find .) | \ ${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel.${_kernel}-dbg.txz .endif .endfor .endif .endif stagekernel: .PHONY ${_+_}${MAKE} -C ${.CURDIR} ${.MAKEFLAGS} distributekernel PORTSDIR?= /usr/ports WSTAGEDIR?= ${MAKEOBJDIRPREFIX}${.CURDIR}/${TARGET}.${TARGET_ARCH}/worldstage KSTAGEDIR?= ${MAKEOBJDIRPREFIX}${.CURDIR}/${TARGET}.${TARGET_ARCH}/kernelstage REPODIR?= ${MAKEOBJDIRPREFIX}${.CURDIR}/repo PKGSIGNKEY?= # empty .ORDER: stage-packages create-packages .ORDER: create-packages create-world-packages .ORDER: create-packages create-kernel-packages .ORDER: create-packages sign-packages _pkgbootstrap: .PHONY .if !exists(${LOCALBASE}/sbin/pkg) @env ASSUME_ALWAYS_YES=YES pkg bootstrap .endif packages: .PHONY ${_+_}${MAKE} -C ${.CURDIR} PKG_VERSION=${PKG_VERSION} real-packages package-pkg: .PHONY rm -rf /tmp/ports.${TARGET} || : env ${WMAKEENV:Q} SRCDIR=${.CURDIR} PORTSDIR=${PORTSDIR} REVISION=${_REVISION} \ PKG_CMD=${PKG_CMD} PKG_VERSION=${PKG_VERSION} REPODIR=${REPODIR} \ WSTAGEDIR=${WSTAGEDIR} \ sh ${.CURDIR}/release/scripts/make-pkg-package.sh real-packages: stage-packages create-packages sign-packages .PHONY stage-packages-world: .PHONY @mkdir -p ${WSTAGEDIR} ${_+_}@cd ${.CURDIR}; \ ${MAKE} DESTDIR=${WSTAGEDIR} -DNO_ROOT stageworld stage-packages-kernel: .PHONY @mkdir -p ${KSTAGEDIR} ${_+_}@cd ${.CURDIR}; \ ${MAKE} DESTDIR=${KSTAGEDIR} -DNO_ROOT stagekernel stage-packages: .PHONY stage-packages-world stage-packages-kernel _repodir: .PHONY @mkdir -p ${REPODIR} create-packages-world: _pkgbootstrap _repodir .PHONY ${_+_}@cd ${.CURDIR}; \ ${MAKE} -f Makefile.inc1 \ DESTDIR=${WSTAGEDIR} \ PKG_VERSION=${PKG_VERSION} create-world-packages create-packages-kernel: _pkgbootstrap _repodir .PHONY ${_+_}@cd ${.CURDIR}; \ ${MAKE} -f Makefile.inc1 \ DESTDIR=${KSTAGEDIR} \ PKG_VERSION=${PKG_VERSION} DISTDIR=kernel \ create-kernel-packages create-packages: .PHONY create-packages-world create-packages-kernel create-world-packages: _pkgbootstrap .PHONY @rm -f ${WSTAGEDIR}/*.plist 2>/dev/null || : @cd ${WSTAGEDIR} ; \ awk -f ${SRCDIR}/release/scripts/mtree-to-plist.awk \ ${WSTAGEDIR}/METALOG @for plist in ${WSTAGEDIR}/*.plist; do \ plist=$${plist##*/} ; \ pkgname=$${plist%.plist} ; \ echo "_PKGS+= $${pkgname}" ; \ done > ${WSTAGEDIR}/packages.mk ${_+_}@cd ${.CURDIR}; \ ${MAKE} -f Makefile.inc1 create-world-packages-jobs \ .MAKE.JOB.PREFIX= .if make(create-world-packages-jobs) .include "${WSTAGEDIR}/packages.mk" .endif create-world-packages-jobs: .PHONY .for pkgname in ${_PKGS} create-world-packages-jobs: create-world-package-${pkgname} create-world-package-${pkgname}: .PHONY @sh ${SRCDIR}/release/packages/generate-ucl.sh -o ${pkgname} \ -s ${SRCDIR} -u ${WSTAGEDIR}/${pkgname}.ucl @awk -F\" ' \ /^name/ { printf("===> Creating %s-", $$2); next } \ /^version/ { print $$2; next } \ ' ${WSTAGEDIR}/${pkgname}.ucl @if [ "${pkgname}" == "runtime" ]; then \ sed -i '' -e "s/%VCS_REVISION%/${VCS_REVISION}/" ${WSTAGEDIR}/${pkgname}.ucl ; \ fi ${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh -o ALLOW_BASE_SHLIBS=yes \ create -M ${WSTAGEDIR}/${pkgname}.ucl \ -p ${WSTAGEDIR}/${pkgname}.plist \ -r ${WSTAGEDIR} \ -o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} .endfor create-kernel-packages: .PHONY _default_flavor= -default .if exists(${KSTAGEDIR}/kernel.meta) . if ${MK_DEBUG_FILES} != "no" _debug=-debug . endif . for flavor in "" ${_debug} create-kernel-packages: create-kernel-packages-flavor${flavor:C,^""$,${_default_flavor},} create-kernel-packages-flavor${flavor:C,^""$,${_default_flavor},}: _pkgbootstrap .PHONY @cd ${KSTAGEDIR}/${DISTDIR} ; \ awk -f ${SRCDIR}/release/scripts/mtree-to-plist.awk \ -v kernel=yes -v _kernconf=${INSTALLKERNEL} \ ${KSTAGEDIR}/kernel.meta ; \ cap_arg=`cd ${SRCDIR}/etc ; ${MAKE} -VCAP_MKDB_ENDIAN` ; \ pwd_arg=`cd ${SRCDIR}/etc ; ${MAKE} -VPWD_MKDB_ENDIAN` ; \ sed -e "s/%VERSION%/${PKG_VERSION}/" \ -e "s/%PKGNAME%/kernel-${INSTALLKERNEL:tl}${flavor}/" \ -e "s/%COMMENT%/FreeBSD ${INSTALLKERNEL} kernel ${flavor}/" \ -e "s/%DESC%/FreeBSD ${INSTALLKERNEL} kernel ${flavor}/" \ -e "s/%CAP_MKDB_ENDIAN%/$${cap_arg}/g" \ -e "s/%PWD_MKDB_ENDIAN%/$${pwd_arg}/g" \ -e "s/ %VCS_REVISION%/${VCS_REVISION}/" \ ${SRCDIR}/release/packages/kernel.ucl \ > ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.ucl ; \ awk -F\" ' \ /name/ { printf("===> Creating %s-", $$2); next } \ /version/ {print $$2; next } ' \ ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.ucl ; \ ${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh -o ALLOW_BASE_SHLIBS=yes \ create -M ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.ucl \ -p ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.plist \ -r ${KSTAGEDIR}/${DISTDIR} \ -o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} . endfor .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" . for _kernel in ${BUILDKERNELS:[2..-1]} . if exists(${KSTAGEDIR}/kernel.${_kernel}.meta) . if ${MK_DEBUG_FILES} != "no" _debug=-debug . endif . for flavor in "" ${_debug} create-kernel-packages: create-kernel-packages-extra-flavor${flavor:C,^""$,${_default_flavor},}-${_kernel} create-kernel-packages-extra-flavor${flavor:C,^""$,${_default_flavor},}-${_kernel}: _pkgbootstrap .PHONY @cd ${KSTAGEDIR}/kernel.${_kernel} ; \ awk -f ${SRCDIR}/release/scripts/mtree-to-plist.awk \ -v kernel=yes -v _kernconf=${_kernel} \ ${KSTAGEDIR}/kernel.${_kernel}.meta ; \ cap_arg=`cd ${SRCDIR}/etc ; ${MAKE} -VCAP_MKDB_ENDIAN` ; \ pwd_arg=`cd ${SRCDIR}/etc ; ${MAKE} -VPWD_MKDB_ENDIAN` ; \ sed -e "s/%VERSION%/${PKG_VERSION}/" \ -e "s/%PKGNAME%/kernel-${_kernel:tl}${flavor}/" \ -e "s/%COMMENT%/FreeBSD ${_kernel} kernel ${flavor}/" \ -e "s/%DESC%/FreeBSD ${_kernel} kernel ${flavor}/" \ -e "s/%CAP_MKDB_ENDIAN%/$${cap_arg}/g" \ -e "s/%PWD_MKDB_ENDIAN%/$${pwd_arg}/g" \ -e "s/ %VCS_REVISION%/${VCS_REVISION}/" \ ${SRCDIR}/release/packages/kernel.ucl \ > ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.ucl ; \ awk -F\" ' \ /name/ { printf("===> Creating %s-", $$2); next } \ /version/ {print $$2; next } ' \ ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.ucl ; \ ${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh -o ALLOW_BASE_SHLIBS=yes \ create -M ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.ucl \ -p ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.plist \ -r ${KSTAGEDIR}/kernel.${_kernel} \ -o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} . endfor . endif . endfor .endif sign-packages: _pkgbootstrap .PHONY @[ -L "${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/latest" ] && \ unlink ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/latest ; \ ${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh repo \ -o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} \ ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} \ ${PKGSIGNKEY} ; \ cd ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI); \ ln -s ${PKG_VERSION} latest # # # checkworld # # Run test suite on installed world. # checkworld: .PHONY @if [ ! -x "${LOCALBASE}/bin/kyua" ]; then \ echo "You need kyua (devel/kyua) to run the test suite." | /usr/bin/fmt; \ exit 1; \ fi ${_+_}PATH="$$PATH:${LOCALBASE}/bin" kyua test -k ${TESTSBASE}/Kyuafile # # # doxygen # # Build the API documentation with doxygen # doxygen: .PHONY @if [ ! -x "${LOCALBASE}/bin/doxygen" ]; then \ echo "You need doxygen (devel/doxygen) to generate the API documentation of the kernel." | /usr/bin/fmt; \ exit 1; \ fi ${_+_}cd ${.CURDIR}/tools/kerneldoc/subsys; ${MAKE} obj all # # update # # Update the source tree(s), by running svn/svnup to update to the # latest copy. # update: .PHONY .if defined(SVN_UPDATE) @echo "--------------------------------------------------------------" @echo ">>> Updating ${.CURDIR} using Subversion" @echo "--------------------------------------------------------------" @(cd ${.CURDIR}; ${SVN} update ${SVNFLAGS}) .endif # # ------------------------------------------------------------------------ # # From here onwards are utility targets used by the 'make world' and # related targets. If your 'world' breaks, you may like to try to fix # the problem and manually run the following targets to attempt to # complete the build. Beware, this is *not* guaranteed to work, you # need to have a pretty good grip on the current state of the system # to attempt to manually finish it. If in doubt, 'make world' again. # # # legacy: Build compatibility shims for the next three targets. This is a # minimal set of tools and shims necessary to compensate for older systems # which don't have the APIs required by the targets built in bootstrap-tools, # build-tools or cross-tools. # # ELF Tool Chain libraries are needed for ELF tools and dtrace tools. # r296685 fix cross-endian objcopy # r310724 fixed PR 215350, a crash in libdwarf with objects built by GCC 6.2. .if ${BOOTSTRAPPING} < 1200020 _elftoolchain_libs= lib/libelf lib/libdwarf .endif legacy: .PHONY # Temporary special case for automatically detecting the clang compiler issue # Note: 9.x didn't have FreeBSD_version bumps often enough, so you may need to # set BOOTSTRAPPING to 0 if you're stable/9 tree post-dates r286035 but is before # the version bump in r296219 (from July 29, 2015 -> Feb 29, 2016). .if ${BOOTSTRAPPING} != 0 && \ ${WANT_COMPILER_TYPE} == "clang" && ${COMPILER_TYPE} == "clang" && ${COMPILER_VERSION} < 30601 .if ${BOOTSTRAPPING} > 10000000 && ${BOOTSTRAPPING} < 1002501 @echo "ERROR: Source upgrades from stable/10 prior to r286033 are not supported."; false .elif ${BOOTSTRAPPING} > 9000000 && ${BOOTSTRAPPING} < 903509 @echo "ERROR: Source upgrades from stable/9 prior to r286035 are not supported."; false .endif .endif .if ${BOOTSTRAPPING} < ${MINIMUM_SUPPORTED_OSREL} && ${BOOTSTRAPPING} != 0 @echo "ERROR: Source upgrades from versions prior to ${MINIMUM_SUPPORTED_REL} are not supported."; \ false .endif .for _tool in tools/build ${_elftoolchain_libs} ${_+_}@${ECHODIR} "===> ${_tool} (obj,includes,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${MAKEOBJDIRPREFIX}/legacy includes; \ ${MAKE} DIRPRFX=${_tool}/ MK_INCLUDES=no all; \ ${MAKE} DIRPRFX=${_tool}/ MK_INCLUDES=no \ DESTDIR=${MAKEOBJDIRPREFIX}/legacy install .endfor # # bootstrap-tools: Build tools needed for compatibility. These are binaries that # are built to build other binaries in the system. However, the focus of these # binaries is usually quite narrow. Bootstrap tools use the host's compiler and # libraries, augmented by -legacy. # _bt= _bootstrap-tools .if ${MK_GAMES} != "no" _strfile= usr.bin/fortune/strfile .endif .if ${MK_GCC} != "no" && ${MK_CXX} != "no" _gperf= gnu/usr.bin/gperf .endif .if ${MK_VT} != "no" _vtfontcvt= usr.bin/vtfontcvt .endif .if ${BOOTSTRAPPING} < 1000033 _m4= usr.bin/m4 _lex= usr.bin/lex ${_bt}-usr.bin/m4: ${_bt}-lib/libopenbsd ${_bt}-usr.bin/lex: ${_bt}-usr.bin/m4 .endif # r245440 mtree -N support added # r313404 requires sha384.h for libnetbsd, added to libmd in r292782 .if ${BOOTSTRAPPING} < 1100093 _nmtree= lib/libmd \ lib/libnetbsd \ usr.sbin/nmtree ${_bt}-lib/libnetbsd: ${_bt}-lib/libmd ${_bt}-usr.sbin/nmtree: ${_bt}-lib/libnetbsd .endif # r246097: log addition login.conf.db, passwd, pwd.db, and spwd.db with cat -l .if ${BOOTSTRAPPING} < 1000027 _cat= bin/cat .endif # r277259 crunchide: Correct 64-bit section header offset # r281674 crunchide: always include both 32- and 64-bit ELF support .if ${BOOTSTRAPPING} < 1100078 _crunchide= usr.sbin/crunch/crunchide .endif # r285986 crunchen: use STRIPBIN rather than STRIP # 1100113: Support MK_AUTO_OBJ # 1200006: META_MODE fixes .if ${BOOTSTRAPPING} < 1100078 || \ (${MK_AUTO_OBJ} == "yes" && ${BOOTSTRAPPING} < 1100114) || \ (${MK_META_MODE} == "yes" && ${BOOTSTRAPPING} < 1200006) _crunchgen= usr.sbin/crunch/crunchgen .endif # r296926 -P keymap search path, MFC to stable/10 in r298297 .if ${BOOTSTRAPPING} < 1003501 || \ (${BOOTSTRAPPING} >= 1100000 && ${BOOTSTRAPPING} < 1100103) _kbdcontrol= usr.sbin/kbdcontrol .endif _yacc= lib/liby \ usr.bin/yacc ${_bt}-usr.bin/yacc: ${_bt}-lib/liby .if ${MK_BSNMP} != "no" _gensnmptree= usr.sbin/bsnmpd/gensnmptree .endif # We need to build tblgen when we're building clang or lld, either as # bootstrap tools, or as the part of the normal build. .if ${MK_CLANG_BOOTSTRAP} != "no" || ${MK_CLANG} != "no" || \ ${MK_LLD_BOOTSTRAP} != "no" || ${MK_LLD} != "no" _clang_tblgen= \ lib/clang/libllvmminimal \ usr.bin/clang/llvm-tblgen \ usr.bin/clang/clang-tblgen ${_bt}-usr.bin/clang/clang-tblgen: ${_bt}-lib/clang/libllvmminimal ${_bt}-usr.bin/clang/llvm-tblgen: ${_bt}-lib/clang/libllvmminimal .endif # Default to building the GPL DTC, but build the BSDL one if users explicitly # request it. _dtc= usr.bin/dtc .if ${MK_GPL_DTC} != "no" _dtc= gnu/usr.bin/dtc .endif .if ${MK_KERBEROS} != "no" _kerberos5_bootstrap_tools= \ kerberos5/tools/make-roken \ kerberos5/lib/libroken \ kerberos5/lib/libvers \ kerberos5/tools/asn1_compile \ kerberos5/tools/slc \ usr.bin/compile_et .ORDER: ${_kerberos5_bootstrap_tools:C/^/${_bt}-/g} .endif ${_bt}-usr.bin/mandoc: ${_bt}-lib/libopenbsd bootstrap-tools: .PHONY # Please document (add comment) why something is in 'bootstrap-tools'. # Try to bound the building of the bootstrap-tool to just the # FreeBSD versions that need the tool built at this stage of the build. .for _tool in \ ${_clang_tblgen} \ ${_kerberos5_bootstrap_tools} \ ${_strfile} \ ${_gperf} \ ${_dtc} \ ${_cat} \ ${_kbdcontrol} \ usr.bin/lorder \ lib/libopenbsd \ usr.bin/mandoc \ usr.bin/rpcgen \ ${_yacc} \ ${_m4} \ ${_lex} \ usr.bin/xinstall \ ${_gensnmptree} \ usr.sbin/config \ ${_crunchide} \ ${_crunchgen} \ ${_nmtree} \ ${_vtfontcvt} \ usr.bin/localedef ${_bt}-${_tool}: .PHONY .MAKE ${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ all; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${MAKEOBJDIRPREFIX}/legacy install bootstrap-tools: ${_bt}-${_tool} .endfor # # build-tools: Build special purpose build tools # .if !defined(NO_SHARE) _share= share/syscons/scrnmaps .endif .if ${MK_GCC} != "no" _gcc_tools= gnu/usr.bin/cc/cc_tools .endif .if ${MK_RESCUE} != "no" # rescue includes programs that have build-tools targets _rescue=rescue/rescue .endif .if ${MK_TCSH} != "no" _tcsh=bin/csh .endif .for _tool in \ ${_tcsh} \ bin/sh \ ${LOCAL_TOOL_DIRS} \ lib/ncurses/ncurses \ lib/ncurses/ncursesw \ ${_rescue} \ ${_share} \ usr.bin/awk \ lib/libmagic \ usr.bin/mkesdb_static \ usr.bin/mkcsmapper_static \ usr.bin/vi/catalog build-tools_${_tool}: .PHONY ${_+_}@${ECHODIR} "===> ${_tool} (obj,build-tools)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ build-tools build-tools: build-tools_${_tool} .endfor .for _tool in \ ${_gcc_tools} build-tools_${_tool}: .PHONY ${_+_}@${ECHODIR} "===> ${_tool} (obj,all)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ all build-tools: build-tools_${_tool} .endfor # # kernel-tools: Build kernel-building tools # kernel-tools: .PHONY mkdir -p ${MAKEOBJDIRPREFIX}/usr mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${MAKEOBJDIRPREFIX}/usr >/dev/null # # cross-tools: All the tools needed to build the rest of the system after # we get done with the earlier stages. It is the last set of tools needed # to begin building the target binaries. # .if ${TARGET_ARCH} != ${MACHINE_ARCH} .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" _btxld= usr.sbin/btxld .endif .endif # Rebuild ctfconvert and ctfmerge to avoid difficult-to-diagnose failures # resulting from missing bug fixes or ELF Toolchain updates. .if ${MK_CDDL} != "no" _dtrace_tools= cddl/lib/libctf cddl/usr.bin/ctfconvert \ cddl/usr.bin/ctfmerge .endif # If we're given an XAS, don't build binutils. .if ${XAS:M/*} == "" .if ${MK_BINUTILS_BOOTSTRAP} != "no" _binutils= gnu/usr.bin/binutils .endif .if ${MK_ELFTOOLCHAIN_BOOTSTRAP} != "no" _elftctools= lib/libelftc \ lib/libpe \ usr.bin/elfcopy \ usr.bin/nm \ usr.bin/size \ usr.bin/strings # These are not required by the build, but can be useful for developers who # cross-build on a FreeBSD 10 host: _elftctools+= usr.bin/addr2line .endif .elif ${TARGET_ARCH} != ${MACHINE_ARCH} && ${MK_ELFTOOLCHAIN_BOOTSTRAP} != "no" # If cross-building with an external binutils we still need to build strip for # the target (for at least crunchide). _elftctools= lib/libelftc \ lib/libpe \ usr.bin/elfcopy .endif .if ${MK_CLANG_BOOTSTRAP} != "no" _clang= usr.bin/clang .endif .if ${MK_LLD_BOOTSTRAP} != "no" _lld= usr.bin/clang/lld .endif .if ${MK_CLANG_BOOTSTRAP} != "no" || ${MK_LLD_BOOTSTRAP} != "no" _clang_libs= lib/clang .endif .if ${MK_GCC_BOOTSTRAP} != "no" _gcc= gnu/usr.bin/cc .endif .if ${MK_USB} != "no" _usb_tools= sys/boot/usb/tools .endif cross-tools: .MAKE .PHONY .for _tool in \ ${LOCAL_XTOOL_DIRS} \ ${_clang_libs} \ ${_clang} \ ${_lld} \ ${_binutils} \ ${_elftctools} \ ${_dtrace_tools} \ ${_gcc} \ ${_btxld} \ ${_usb_tools} ${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ all; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${MAKEOBJDIRPREFIX} install .endfor # # native-xtools is the current target for qemu-user cross builds of ports # via poudriere and the imgact_binmisc kernel module. # This target merely builds a toolchan/sysroot, then builds the tools it wants # with the options it wants in a special MAKEOBJDIRPREFIX, using the toolchain # already built. It then installs the static tools to NXBDESTDIR for Poudriere # to pickup. # NXBOBJDIR= ${MAKEOBJDIRPREFIX}/nxb/${TARGET}.${TARGET_ARCH} NXTP?= /nxb-bin .if ${NXTP:N/*} .error NXTP variable should be an absolute path .endif NXBDESTDIR?= ${DESTDIR}${NXTP} # This is the list of tools to be built/installed as static and where # appropriate to build for the given TARGET.TARGET_ARCH. NXBDIRS+= \ bin/cat \ bin/chmod \ bin/cp \ ${_tcsh} \ bin/echo \ bin/expr \ bin/hostname \ bin/ln \ bin/ls \ bin/mkdir \ bin/mv \ bin/ps \ bin/realpath \ bin/rm \ bin/rmdir \ bin/sh \ bin/sleep \ sbin/md5 \ sbin/sysctl \ usr.bin/addr2line \ usr.bin/ar \ usr.bin/awk \ usr.bin/basename \ usr.bin/bmake \ usr.bin/bzip2 \ usr.bin/cmp \ usr.bin/diff \ usr.bin/dirname \ usr.bin/elfcopy \ usr.bin/env \ usr.bin/fetch \ usr.bin/find \ usr.bin/grep \ usr.bin/gzip \ usr.bin/id \ usr.bin/lex \ usr.bin/limits \ usr.bin/lorder \ usr.bin/mandoc \ usr.bin/mktemp \ usr.bin/mt \ usr.bin/nm \ usr.bin/patch \ usr.bin/readelf \ usr.bin/sed \ usr.bin/size \ usr.bin/sort \ usr.bin/strings \ usr.bin/tar \ usr.bin/touch \ usr.bin/tr \ usr.bin/true \ usr.bin/uniq \ usr.bin/unzip \ usr.bin/xargs \ usr.bin/xinstall \ usr.bin/xz \ usr.bin/yacc \ usr.sbin/chown .if ${MK_CLANG} != "no" NXBDIRS+= lib/clang SUBDIR_DEPEND_usr.bin/clang= lib/clang NXBDIRS+= usr.bin/clang .elif ${MK_GCC} != "no" NXBDIRS+= gnu/usr.bin/cc .endif .if ${MK_BINUTILS} != "no" NXBDIRS+= gnu/usr.bin/binutils .endif NXBMAKEENV+= \ MAKEOBJDIRPREFIX=${NXBOBJDIR:Q} NXBMAKEARGS+= \ OBJTREE=${NXBOBJDIR:Q} \ -DNO_SHARED \ -DNO_CPU_CFLAGS \ -DNO_PIC \ SSP_CFLAGS= \ MK_CLANG_EXTRAS=no \ MK_CLANG_FULL=no \ MK_CTF=no \ MK_DEBUG_FILES=no \ MK_GDB=no \ MK_HTML=no \ MK_LLDB=no \ MK_MAN=no \ MK_MAN_UTILS=yes \ MK_OFED=no \ MK_OPENSSH=no \ MK_PROFILE=no \ MK_SENDMAIL=no \ MK_SVNLITE=no \ MK_TESTS=no \ MK_WARNS=no \ MK_ZFS=no # For 'toolchain' we want to produce native binaries that themselves generate # native binaries. NXBTMAKE= ${NXBMAKEENV} ${MAKE} ${NXBMAKEARGS:N-DNO_PIC:N-DNO_SHARED} \ TARGET=${MACHINE} TARGET_ARCH=${MACHINE_ARCH} # For 'everything' we want to produce native binaries (hence -target to # be MACHINE) that themselves generate TARGET.TARGET_ARCH binaries. # TARGET/TARGET_ARCH are still passed along from user. NXBMAKE= ${NXBMAKEENV} ${MAKE} ${NXBMAKEARGS} \ TARGET_TRIPLE=${MACHINE_TRIPLE:Q} native-xtools: .PHONY # Build the bootstrap/host/cross tools that produce native binaries ${_+_}cd ${.CURDIR}; ${NXBTMAKE} kernel-toolchain # Populate includes/libraries sysroot that produce native binaries. # This is split out from 'toolchain' above mostly so that target LLVM # libraries have a proper LLVM_DEFAULT_TARGET_TRIPLE without # polluting the cross-compiler build. The LLVM/GCC libs are skipped # here to avoid the problem but are kept in 'toolchain' so that # needed build tools are built. ${_+_}cd ${.CURDIR}; ${NXBTMAKE} _includes MK_CLANG=no MK_GCC=no ${_+_}cd ${.CURDIR}; ${NXBTMAKE} _libraries MK_CLANG=no MK_GCC=no .if !defined(NO_OBJ) ${_+_}cd ${.CURDIR}; ${NXBMAKE} SUBDIR_OVERRIDE="${NXBDIRS:M*}" _obj .endif ${_+_}cd ${.CURDIR}; ${NXBMAKE} SUBDIR_OVERRIDE="${NXBDIRS:M*}" \ everything @echo ">> native-xtools done. Use 'make native-xtools-install' to install to a given DESTDIR" native-xtools-install: .PHONY mkdir -p ${NXBDESTDIR}/bin ${NXBDESTDIR}/sbin ${NXBDESTDIR}/usr mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${NXBDESTDIR}/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${NXBDESTDIR}/usr/include >/dev/null ${_+_}cd ${.CURDIR}; ${NXBMAKE} -f Makefile.inc1 \ DESTDIR=${NXBDESTDIR} \ SUBDIR_OVERRIDE="${NXBDIRS:M*}" \ -DNO_ROOT \ install # # hierarchy - ensure that all the needed directories are present # hierarchy hier: .MAKE .PHONY ${_+_}cd ${.CURDIR}/etc; ${HMAKE} distrib-dirs # # libraries - build all libraries, and install them under ${DESTDIR}. # # The list of libraries with dependents (${_prebuild_libs}) and their # interdependencies (__L) are built automatically by the # ${.CURDIR}/tools/make_libdeps.sh script. # libraries: .MAKE .PHONY ${_+_}cd ${.CURDIR}; \ ${MAKE} -f Makefile.inc1 _prereq_libs; \ ${MAKE} -f Makefile.inc1 _startup_libs; \ ${MAKE} -f Makefile.inc1 _prebuild_libs; \ ${MAKE} -f Makefile.inc1 _generic_libs # # static libgcc.a prerequisite for shared libc # _prereq_libs= lib/libcompiler_rt .if ${MK_SSP} != "no" _prereq_libs+= gnu/lib/libssp/libssp_nonshared .endif # # The coverage libraries must be built for the target prior to ${_startup_libs} # for world to have runtime coverage instrumentation. # .if ${MK_COVERAGE} != "no" .if defined(WANT_COMPILER) && ${WANT_COMPILER} == "clang" _coverage_libs+= lib/libclang_rt/profile .endif .endif .if !empty(_coverage_libs) _prereq_libs+= ${_coverage_libs} .endif # These dependencies are not automatically generated: # # gnu/lib/csu, gnu/lib/libgcc, lib/csu and lib/libc must be built before # all shared libraries for ELF. # _startup_libs= gnu/lib/csu _startup_libs+= lib/csu _startup_libs+= lib/libcompiler_rt _startup_libs+= lib/libc _startup_libs+= lib/libc_nonshared .if ${MK_LIBCPLUSPLUS} != "no" _startup_libs+= lib/libcxxrt .endif .if ${MK_LLVM_LIBUNWIND} != "no" _prereq_libs+= lib/libgcc_eh lib/libgcc_s _startup_libs+= lib/libgcc_eh lib/libgcc_s lib/libgcc_s__L: lib/libc__L lib/libgcc_s__L: lib/libc_nonshared__L .if ${MK_LIBCPLUSPLUS} != "no" lib/libcxxrt__L: lib/libgcc_s__L .endif .else # MK_LLVM_LIBUNWIND == no _prereq_libs+= gnu/lib/libgcc _startup_libs+= gnu/lib/libgcc gnu/lib/libgcc__L: lib/libc__L gnu/lib/libgcc__L: lib/libc_nonshared__L .if ${MK_LIBCPLUSPLUS} != "no" lib/libcxxrt__L: gnu/lib/libgcc__L .endif .endif _prebuild_libs= ${_kerberos5_lib_libasn1} \ ${_kerberos5_lib_libhdb} \ ${_kerberos5_lib_libheimbase} \ ${_kerberos5_lib_libheimntlm} \ ${_libsqlite3} \ ${_kerberos5_lib_libheimipcc} \ ${_kerberos5_lib_libhx509} ${_kerberos5_lib_libkrb5} \ ${_kerberos5_lib_libroken} \ ${_kerberos5_lib_libwind} \ lib/libbz2 ${_libcom_err} lib/libcrypt \ lib/libelf lib/libexpat \ lib/libfigpar \ ${_lib_libgssapi} \ lib/libkiconv lib/libkvm lib/liblzma lib/libmd lib/libnv \ ${_lib_casper} \ lib/ncurses/ncurses lib/ncurses/ncursesw \ lib/libopie lib/libpam/libpam ${_lib_libthr} \ ${_lib_libradius} lib/libsbuf lib/libtacplus \ lib/libgeom \ ${_cddl_lib_libumem} ${_cddl_lib_libnvpair} \ ${_cddl_lib_libuutil} \ ${_cddl_lib_libavl} \ ${_cddl_lib_libzfs_core} \ ${_cddl_lib_libctf} \ lib/libutil lib/libpjdlog ${_lib_libypclnt} lib/libz lib/msun \ ${_secure_lib_libcrypto} ${_lib_libldns} \ ${_secure_lib_libssh} ${_secure_lib_libssl} .if ${MK_GNUCXX} != "no" _prebuild_libs+= gnu/lib/libstdc++ gnu/lib/libsupc++ gnu/lib/libstdc++__L: lib/msun__L gnu/lib/libsupc++__L: gnu/lib/libstdc++__L .endif .if ${MK_DIALOG} != "no" _prebuild_libs+= gnu/lib/libdialog gnu/lib/libdialog__L: lib/msun__L lib/ncurses/ncursesw__L .endif .if ${MK_LIBCPLUSPLUS} != "no" _prebuild_libs+= lib/libc++ .endif lib/libgeom__L: lib/libexpat__L lib/libkvm__L: lib/libelf__L .if ${MK_LIBTHR} != "no" _lib_libthr= lib/libthr .endif .if ${MK_RADIUS_SUPPORT} != "no" _lib_libradius= lib/libradius .endif .if ${MK_OFED} != "no" _ofed_lib= contrib/ofed/usr.lib _prebuild_libs+= contrib/ofed/usr.lib/libosmcomp _prebuild_libs+= contrib/ofed/usr.lib/libopensm _prebuild_libs+= contrib/ofed/usr.lib/libibcommon _prebuild_libs+= contrib/ofed/usr.lib/libibverbs _prebuild_libs+= contrib/ofed/usr.lib/libibumad contrib/ofed/usr.lib/libopensm__L: lib/libthr__L contrib/ofed/usr.lib/libosmcomp__L: lib/libthr__L contrib/ofed/usr.lib/libibumad__L: contrib/ofed/usr.lib/libibcommon__L .endif .if ${MK_CASPER} != "no" _lib_casper= lib/libcasper .endif lib/libpjdlog__L: lib/libutil__L lib/libcasper__L: lib/libnv__L lib/liblzma__L: lib/libthr__L _generic_libs= ${_cddl_lib} gnu/lib ${_kerberos5_lib} lib ${_secure_lib} usr.bin/lex/lib ${_ofed_lib} .for _DIR in ${LOCAL_LIB_DIRS} .if exists(${.CURDIR}/${_DIR}/Makefile) && empty(_generic_libs:M${_DIR}) _generic_libs+= ${_DIR} .endif .endfor lib/libopie__L lib/libtacplus__L: lib/libmd__L .if ${MK_CDDL} != "no" _cddl_lib_libumem= cddl/lib/libumem _cddl_lib_libnvpair= cddl/lib/libnvpair _cddl_lib_libavl= cddl/lib/libavl _cddl_lib_libuutil= cddl/lib/libuutil .if ${MK_ZFS} != "no" _cddl_lib_libzfs_core= cddl/lib/libzfs_core cddl/lib/libzfs_core__L: cddl/lib/libnvpair__L .endif _cddl_lib_libctf= cddl/lib/libctf _cddl_lib= cddl/lib cddl/lib/libctf__L: lib/libz__L .endif # cddl/lib/libdtrace requires lib/libproc and lib/librtld_db; it's only built # on select architectures though (see cddl/lib/Makefile) .if ${MACHINE_CPUARCH} != "sparc64" _prebuild_libs+= lib/libprocstat lib/libproc lib/librtld_db lib/libprocstat__L: lib/libelf__L lib/libkvm__L lib/libutil__L lib/libproc__L: lib/libprocstat__L lib/librtld_db__L: lib/libprocstat__L .endif .if ${MK_CRYPT} != "no" .if ${MK_OPENSSL} != "no" _secure_lib_libcrypto= secure/lib/libcrypto _secure_lib_libssl= secure/lib/libssl lib/libradius__L secure/lib/libssl__L: secure/lib/libcrypto__L .if ${MK_LDNS} != "no" _lib_libldns= lib/libldns lib/libldns__L: secure/lib/libcrypto__L .endif .if ${MK_OPENSSH} != "no" _secure_lib_libssh= secure/lib/libssh secure/lib/libssh__L: lib/libz__L secure/lib/libcrypto__L lib/libcrypt__L .if ${MK_LDNS} != "no" secure/lib/libssh__L: lib/libldns__L .endif .if ${MK_GSSAPI} != "no" && ${MK_KERBEROS_SUPPORT} != "no" secure/lib/libssh__L: lib/libgssapi__L kerberos5/lib/libkrb5__L \ kerberos5/lib/libhx509__L kerberos5/lib/libasn1__L lib/libcom_err__L \ lib/libmd__L kerberos5/lib/libroken__L .endif .endif .endif _secure_lib= secure/lib .endif .if ${MK_KERBEROS} != "no" kerberos5/lib/libasn1__L: lib/libcom_err__L kerberos5/lib/libroken__L kerberos5/lib/libhdb__L: kerberos5/lib/libasn1__L lib/libcom_err__L \ kerberos5/lib/libkrb5__L kerberos5/lib/libroken__L \ kerberos5/lib/libwind__L lib/libsqlite3__L kerberos5/lib/libheimntlm__L: secure/lib/libcrypto__L kerberos5/lib/libkrb5__L \ kerberos5/lib/libroken__L lib/libcom_err__L kerberos5/lib/libhx509__L: kerberos5/lib/libasn1__L lib/libcom_err__L \ secure/lib/libcrypto__L kerberos5/lib/libroken__L kerberos5/lib/libwind__L kerberos5/lib/libkrb5__L: kerberos5/lib/libasn1__L lib/libcom_err__L \ lib/libcrypt__L secure/lib/libcrypto__L kerberos5/lib/libhx509__L \ kerberos5/lib/libroken__L kerberos5/lib/libwind__L \ kerberos5/lib/libheimbase__L kerberos5/lib/libheimipcc__L kerberos5/lib/libroken__L: lib/libcrypt__L kerberos5/lib/libwind__L: kerberos5/lib/libroken__L lib/libcom_err__L kerberos5/lib/libheimbase__L: lib/libthr__L kerberos5/lib/libheimipcc__L: kerberos5/lib/libroken__L kerberos5/lib/libheimbase__L lib/libthr__L .endif lib/libsqlite3__L: lib/libthr__L .if ${MK_GSSAPI} != "no" _lib_libgssapi= lib/libgssapi .endif .if ${MK_KERBEROS} != "no" _kerberos5_lib= kerberos5/lib _kerberos5_lib_libasn1= kerberos5/lib/libasn1 _kerberos5_lib_libhdb= kerberos5/lib/libhdb _kerberos5_lib_libheimbase= kerberos5/lib/libheimbase _kerberos5_lib_libkrb5= kerberos5/lib/libkrb5 _kerberos5_lib_libhx509= kerberos5/lib/libhx509 _kerberos5_lib_libroken= kerberos5/lib/libroken _kerberos5_lib_libheimntlm= kerberos5/lib/libheimntlm _libsqlite3= lib/libsqlite3 _kerberos5_lib_libheimipcc= kerberos5/lib/libheimipcc _kerberos5_lib_libwind= kerberos5/lib/libwind _libcom_err= lib/libcom_err .endif .if ${MK_NIS} != "no" _lib_libypclnt= lib/libypclnt .endif .if ${MK_OPENSSL} == "no" lib/libradius__L: lib/libmd__L .endif lib/libproc__L: \ ${_cddl_lib_libctf:D${_cddl_lib_libctf}__L} lib/libelf__L lib/librtld_db__L lib/libutil__L .if ${MK_CXX} != "no" .if ${MK_LIBCPLUSPLUS} != "no" lib/libproc__L: lib/libcxxrt__L .else # This implies MK_GNUCXX != "no"; see lib/libproc lib/libproc__L: gnu/lib/libsupc++__L .endif .endif .for _lib in ${_prereq_libs} ${_lib}__PL: .PHONY .MAKE .if exists(${.CURDIR}/${_lib}) ${_+_}@${ECHODIR} "===> ${_lib} (obj,all,install)"; \ cd ${.CURDIR}/${_lib}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ obj; fi; \ ${MAKE} MK_TESTS=no MK_PROFILE=no -DNO_PIC \ DIRPRFX=${_lib}/ all; \ ${MAKE} MK_COVERAGE=no MK_TESTS=no MK_PROFILE=no -DNO_PIC \ DIRPRFX=${_lib}/ install .endif .endfor .for _lib in ${_startup_libs} ${_prebuild_libs} ${_generic_libs} ${_lib}__L: .PHONY .MAKE .if exists(${.CURDIR}/${_lib}) ${_+_}@${ECHODIR} "===> ${_lib} (obj,all,install)"; \ cd ${.CURDIR}/${_lib}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ obj; fi; \ ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ all; \ ${MAKE} MK_COVERAGE=no MK_TESTS=no DIRPRFX=${_lib}/ install .endif .endfor _prereq_libs: ${_prereq_libs:S/$/__PL/} _startup_libs: ${_startup_libs:S/$/__L/} _prebuild_libs: ${_prebuild_libs:S/$/__L/} _generic_libs: ${_generic_libs:S/$/__L/} # Enable SUBDIR_PARALLEL when not calling 'make all', unless called from # 'everything' with _PARALLEL_SUBDIR_OK set. This is because it is unlikely # that running 'make all' from the top-level, especially with a SUBDIR_OVERRIDE # or LOCAL_DIRS set, will have a reliable build if SUBDIRs are built in # parallel. This is safe for the world stage of buildworld though since it has # already built libraries in a proper order and installed includes into # WORLDTMP. Special handling is done for SUBDIR ordering for 'install*' to # avoid trashing a system if it crashes mid-install. .if !make(all) || defined(_PARALLEL_SUBDIR_OK) SUBDIR_PARALLEL= .endif .include .if make(check-old) || make(check-old-dirs) || \ make(check-old-files) || make(check-old-libs) || \ make(delete-old) || make(delete-old-dirs) || \ make(delete-old-files) || make(delete-old-libs) # # check for / delete old files section # .include "ObsoleteFiles.inc" OLD_LIBS_MESSAGE="Please be sure no application still uses those libraries, \ else you can not start such an application. Consult UPDATING for more \ information regarding how to cope with the removal/revision bump of a \ specific library." .if !defined(BATCH_DELETE_OLD_FILES) RM_I=-i .else RM_I=-v .endif delete-old-files: .PHONY @echo ">>> Removing old files (only deletes safe to delete libs)" # Ask for every old file if the user really wants to remove it. # It's annoying, but better safe than sorry. # NB: We cannot pass the list of OLD_FILES as a parameter because the # argument list will get too long. Using .for/.endfor make "loops" will make # the Makefile parser segfault. @exec 3<&0; \ cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_FILES -V "OLD_FILES:Musr/share/*.gz:R" | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ chflags noschg "${DESTDIR}/$${file}" 2>/dev/null || true; \ rm ${RM_I} "${DESTDIR}/$${file}" <&3; \ fi; \ for ext in debug symbols; do \ if ! [ -e "${DESTDIR}/$${file}" ] && [ -f \ "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \ rm ${RM_I} "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" \ <&3; \ fi; \ done; \ done # Remove catpages without corresponding manpages. @exec 3<&0; \ find ${DESTDIR}/usr/share/man/cat* ! -type d 2>/dev/null | \ sed -ep -e's:${DESTDIR}/usr/share/man/cat:${DESTDIR}/usr/share/man/man:' | \ while read catpage; do \ read manpage; \ if [ ! -e "$${manpage}" ]; then \ rm ${RM_I} $${catpage} <&3; \ fi; \ done @echo ">>> Old files removed" check-old-files: .PHONY @echo ">>> Checking for old files" @cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_FILES -V "OLD_FILES:Musr/share/*.gz:R" | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ echo "${DESTDIR}/$${file}"; \ fi; \ for ext in debug symbols; do \ if [ -f "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}"; \ fi; \ done; \ done # Check for catpages without corresponding manpages. @find ${DESTDIR}/usr/share/man/cat* ! -type d 2>/dev/null | \ sed -ep -e's:${DESTDIR}/usr/share/man/cat:${DESTDIR}/usr/share/man/man:' | \ while read catpage; do \ read manpage; \ if [ ! -e "$${manpage}" ]; then \ echo $${catpage}; \ fi; \ done delete-old-libs: .PHONY @echo ">>> Removing old libraries" @echo "${OLD_LIBS_MESSAGE}" | fmt @exec 3<&0; \ cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_LIBS | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ chflags noschg "${DESTDIR}/$${file}" 2>/dev/null || true; \ rm ${RM_I} "${DESTDIR}/$${file}" <&3; \ fi; \ for ext in debug symbols; do \ if ! [ -e "${DESTDIR}/$${file}" ] && [ -f \ "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \ rm ${RM_I} "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" \ <&3; \ fi; \ done; \ done @echo ">>> Old libraries removed" check-old-libs: .PHONY @echo ">>> Checking for old libraries" @cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_LIBS | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ echo "${DESTDIR}/$${file}"; \ fi; \ for ext in debug symbols; do \ if [ -f "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}"; \ fi; \ done; \ done delete-old-dirs: .PHONY @echo ">>> Removing old directories" @cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_DIRS | xargs -n1 | sort -r | \ while read dir; do \ if [ -d "${DESTDIR}/$${dir}" ]; then \ rmdir -v "${DESTDIR}/$${dir}" || true; \ elif [ -L "${DESTDIR}/$${dir}" ]; then \ echo "${DESTDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ if [ -d "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \ rmdir -v "${DESTDIR}${DEBUGDIR}/$${dir}" || true; \ elif [ -L "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ done @echo ">>> Old directories removed" check-old-dirs: .PHONY @echo ">>> Checking for old directories" @cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_DIRS | xargs -n1 | \ while read dir; do \ if [ -d "${DESTDIR}/$${dir}" ]; then \ echo "${DESTDIR}/$${dir}"; \ elif [ -L "${DESTDIR}/$${dir}" ]; then \ echo "${DESTDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ if [ -d "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${dir}"; \ elif [ -L "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ done delete-old: delete-old-files delete-old-dirs .PHONY @echo "To remove old libraries run '${MAKE_CMD} delete-old-libs'." check-old: check-old-files check-old-libs check-old-dirs .PHONY @echo "To remove old files and directories run '${MAKE_CMD} delete-old'." @echo "To remove old libraries run '${MAKE_CMD} delete-old-libs'." .endif # # showconfig - show build configuration. # showconfig: .PHONY @(${MAKE} -n -f ${.CURDIR}/sys/conf/kern.opts.mk -V dummy -dg1 UPDATE_DEPENDFILE=no NO_OBJ=yes; \ ${MAKE} -n -f ${.CURDIR}/share/mk/src.opts.mk -V dummy -dg1 UPDATE_DEPENDFILE=no NO_OBJ=yes) 2>&1 | grep ^MK_ | sort -u .if !empty(KRNLOBJDIR) && !empty(KERNCONF) DTBOUTPUTPATH= ${KRNLOBJDIR}/${KERNCONF}/ .if !defined(FDT_DTS_FILE) || empty(FDT_DTS_FILE) .if exists(${KERNCONFDIR}/${KERNCONF}) FDT_DTS_FILE!= awk 'BEGIN {FS="="} /^makeoptions[[:space:]]+FDT_DTS_FILE/ {print $$2}' \ '${KERNCONFDIR}/${KERNCONF}' ; echo .endif .endif .endif .if !defined(DTBOUTPUTPATH) || !exists(${DTBOUTPUTPATH}) DTBOUTPUTPATH= ${.CURDIR} .endif # # Build 'standalone' Device Tree Blob # builddtb: .PHONY @PATH=${TMPPATH} MACHINE=${TARGET} \ ${.CURDIR}/sys/tools/fdt/make_dtb.sh ${.CURDIR}/sys \ "${FDT_DTS_FILE}" ${DTBOUTPUTPATH} ############### # cleanworld # In the following, the first 'rm' in a series will usually remove all # files and directories. If it does not, then there are probably some # files with file flags set, so this unsets them and tries the 'rm' a # second time. There are situations where this target will be cleaning # some directories via more than one method, but that duplication is # needed to correctly handle all the possible situations. Removing all # files without file flags set in the first 'rm' instance saves time, # because 'chflags' will need to operate on fewer files afterwards. # # It is expected that BW_CANONICALOBJDIR == the CANONICALOBJDIR as would be # created by bsd.obj.mk, except that we don't want to .include that file # in this makefile. We don't do a cleandir walk if MK_AUTO_OBJ is yes # since it is not possible for files to land in the wrong place. # BW_CANONICALOBJDIR:=${OBJTREE}${.CURDIR} cleanworld: .PHONY .if exists(${BW_CANONICALOBJDIR}/) -rm -rf ${BW_CANONICALOBJDIR}/* -chflags -R 0 ${BW_CANONICALOBJDIR} rm -rf ${BW_CANONICALOBJDIR}/* .endif .if ${MK_AUTO_OBJ} == "no" .if ${.CURDIR} == ${.OBJDIR} || ${.CURDIR}/obj == ${.OBJDIR} # To be safe in this case, fall back to a 'make cleandir' ${_+_}@cd ${.CURDIR}; ${MAKE} cleandir .endif .endif .if ${TARGET} == ${MACHINE} && ${TARGET_ARCH} == ${MACHINE_ARCH} XDEV_CPUTYPE?=${CPUTYPE} .else XDEV_CPUTYPE?=${TARGET_CPUTYPE} .endif NOFUN= MK_COVERAGE=no -DNO_FSCHG MK_HTML=no -DNO_LINT \ MK_MAN=no MK_NLS=no MK_PROFILE=no \ MK_KERBEROS=no MK_RESCUE=no MK_TESTS=no MK_WARNS=no \ TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \ CPUTYPE=${XDEV_CPUTYPE} XDDIR=${TARGET_ARCH}-freebsd XDTP?=/usr/${XDDIR} .if ${XDTP:N/*} .error XDTP variable should be an absolute path .endif CDBENV=MAKEOBJDIRPREFIX=${MAKEOBJDIRPREFIX}/${XDDIR} \ INSTALL="sh ${.CURDIR}/tools/install.sh" CDENV= ${CDBENV} \ TOOLS_PREFIX=${XDTP} .if ${WANT_COMPILER_TYPE} == gcc || \ (defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == gcc) # GCC requires -isystem and -L when using a cross-compiler. --sysroot # won't set header path and -L is used to ensure the base library path # is added before the port PREFIX library path. CD2CFLAGS+= -isystem ${XDDESTDIR}/usr/include -L${XDDESTDIR}/usr/lib # GCC requires -B to find /usr/lib/crti.o when using a cross-compiler # combined with --sysroot. CD2CFLAGS+= -B${XDDESTDIR}/usr/lib # Force using libc++ for external GCC. .if ${X_COMPILER_TYPE} == gcc && ${X_COMPILER_VERSION} >= 40800 CD2CXXFLAGS+= -isystem ${XDDESTDIR}/usr/include/c++/v1 -std=c++11 \ -nostdinc++ .endif .endif CD2CFLAGS+= --sysroot=${XDDESTDIR}/ CD2ENV=${CDENV} CC="${CC} ${CD2CFLAGS}" CXX="${CXX} ${CD2CXXFLAGS} ${CD2CFLAGS}" \ CPP="${CPP} ${CD2CFLAGS}" \ MACHINE=${TARGET} MACHINE_ARCH=${TARGET_ARCH} CDTMP= ${MAKEOBJDIRPREFIX}/${XDDIR}/${.CURDIR}/tmp CDMAKE=${CDENV} PATH=${CDTMP}/usr/bin:${PATH} ${MAKE} ${NOFUN} CD2MAKE=${CD2ENV} PATH=${CDTMP}/usr/bin:${XDDESTDIR}/usr/bin:${PATH} ${MAKE} ${NOFUN} .if ${MK_META_MODE} != "no" # Don't rebuild build-tools targets during normal build. CD2MAKE+= BUILD_TOOLS_META=.NOMETA .endif XDDESTDIR=${DESTDIR}${XDTP} .ORDER: xdev-build xdev-install xdev-links xdev: xdev-build xdev-install .PHONY .ORDER: _xb-worldtmp _xb-bootstrap-tools _xb-build-tools _xb-cross-tools xdev-build: _xb-worldtmp _xb-bootstrap-tools _xb-build-tools _xb-cross-tools .PHONY _xb-worldtmp: .PHONY mkdir -p ${CDTMP}/usr mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${CDTMP}/usr >/dev/null _xb-bootstrap-tools: .PHONY .for _tool in \ ${_clang_tblgen} \ ${_gperf} \ ${_yacc} ${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${CDMAKE} DIRPRFX=${_tool}/ obj; fi; \ ${CDMAKE} DIRPRFX=${_tool}/ all; \ ${CDMAKE} DIRPRFX=${_tool}/ DESTDIR=${CDTMP} install .endfor _xb-build-tools: .PHONY ${_+_}@cd ${.CURDIR}; \ ${CDBENV} ${MAKE} -f Makefile.inc1 ${NOFUN} build-tools XDEVDIRS= \ ${_lld} \ ${_binutils} \ ${_elftctools} \ usr.bin/ar \ ${_clang_libs} \ ${_clang} \ ${_gcc} _xb-cross-tools: .PHONY .for _tool in ${XDEVDIRS} ${_+_}@${ECHODIR} "===> xdev ${_tool} (obj,all)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${CDMAKE} DIRPRFX=${_tool}/ obj; fi; \ ${CDMAKE} DIRPRFX=${_tool}/ all .endfor _xi-mtree: .PHONY ${_+_}@${ECHODIR} "mtree populating ${XDDESTDIR}" mkdir -p ${XDDESTDIR} mtree -deU -f ${.CURDIR}/etc/mtree/BSD.root.dist \ -p ${XDDESTDIR} >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${XDDESTDIR}/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${XDDESTDIR}/usr/include >/dev/null .if defined(LIBCOMPAT) mtree -deU -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${XDDESTDIR}/usr >/dev/null .endif .if ${MK_TESTS} != "no" mkdir -p ${XDDESTDIR}${TESTSBASE} mtree -deU -f ${.CURDIR}/etc/mtree/BSD.tests.dist \ -p ${XDDESTDIR}${TESTSBASE} >/dev/null .endif .ORDER: xdev-build _xi-mtree _xi-cross-tools _xi-includes _xi-libraries xdev-install: xdev-build _xi-mtree _xi-cross-tools _xi-includes _xi-libraries .PHONY _xi-cross-tools: .PHONY @echo "_xi-cross-tools" .for _tool in ${XDEVDIRS} ${_+_}@${ECHODIR} "===> xdev ${_tool} (install)"; \ cd ${.CURDIR}/${_tool}; \ ${CDMAKE} DIRPRFX=${_tool}/ install DESTDIR=${XDDESTDIR} .endfor _xi-includes: .PHONY .if !defined(NO_OBJ) ${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 _obj \ DESTDIR=${XDDESTDIR} .endif ${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 includes \ DESTDIR=${XDDESTDIR} _xi-libraries: .PHONY ${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 libraries \ DESTDIR=${XDDESTDIR} xdev-links: .PHONY ${_+_}cd ${XDDESTDIR}/usr/bin; \ mkdir -p ../../../../usr/bin; \ for i in *; do \ ln -sf ../../${XDTP}/usr/bin/$$i \ ../../../../usr/bin/${XDDIR}-$$i; \ ln -sf ../../${XDTP}/usr/bin/$$i \ ../../../../usr/bin/${XDDIR}${_REVISION}-$$i; \ done Index: projects/runtime-coverage/lib/libcasper/services/cap_grp/cap_grp.h =================================================================== --- projects/runtime-coverage/lib/libcasper/services/cap_grp/cap_grp.h (revision 325209) +++ projects/runtime-coverage/lib/libcasper/services/cap_grp/cap_grp.h (revision 325210) @@ -1,89 +1,89 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Pawel Jakub Dawidek under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAP_GRP_H_ #define _CAP_GRP_H_ #ifdef HAVE_CASPER #define WITH_CASPER #endif #ifdef WITH_CASPER struct group *cap_getgrent(cap_channel_t *chan); struct group *cap_getgrnam(cap_channel_t *chan, const char *name); struct group *cap_getgrgid(cap_channel_t *chan, gid_t gid); int cap_getgrent_r(cap_channel_t *chan, struct group *grp, char *buffer, size_t bufsize, struct group **result); int cap_getgrnam_r(cap_channel_t *chan, const char *name, struct group *grp, char *buffer, size_t bufsize, struct group **result); int cap_getgrgid_r(cap_channel_t *chan, gid_t gid, struct group *grp, char *buffer, size_t bufsize, struct group **result); int cap_setgroupent(cap_channel_t *chan, int stayopen); int cap_setgrent(cap_channel_t *chan); void cap_endgrent(cap_channel_t *chan); int cap_grp_limit_cmds(cap_channel_t *chan, const char * const *cmds, size_t ncmds); int cap_grp_limit_fields(cap_channel_t *chan, const char * const *fields, size_t nfields); int cap_grp_limit_groups(cap_channel_t *chan, const char * const *names, size_t nnames, gid_t *gids, size_t ngids); #else #define cap_getgrent(chan) getgrent() #define cap_getgrnam(chan, name) getgrnam(name) #define cap_getgrgid(chan, gid) getgrgid(gid) #define cap_setgroupent(chan, stayopen) etgroupent(stayopen) #define endgrent(chan) endgrent() -inline int +static inline int cap_setgrent(cap_channel_t *chan __unused) { setgrent(); return(0); } #define cap_getgrent_r(chan, grp, buffer, bufsize, result) \ getgrent_r(grp, buffer, bufsize, result) #define cap_getgrnam_r(chan, name, grp, buffer, bufsize, result) \ getgrnam_r(name, grp, buffer, bufsize, result) #define cap_getgrgid_r(chan, gid, grp, buffer, bufsize, result) \ getgrgid_r(gid, grp, buffer, bufsize, result) #define cap_grp_limit_cmds(chan, cmds, ncmds) (0) #define cap_grp_limit_fields(chan, fields, nfields) (0) #define cap_grp_limit_groups(chan, names, nnames, gids, ngids) (0) #endif #endif /* !_CAP_GRP_H_ */ Index: projects/runtime-coverage/sbin/reboot/reboot.c =================================================================== --- projects/runtime-coverage/sbin/reboot/reboot.c (revision 325209) +++ projects/runtime-coverage/sbin/reboot/reboot.c (revision 325210) @@ -1,280 +1,280 @@ /* * Copyright (c) 1980, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if 0 #ifndef lint static const char copyright[] = "@(#) Copyright (c) 1980, 1986, 1993\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #ifndef lint static char sccsid[] = "@(#)reboot.c 8.1 (Berkeley) 6/5/93"; #endif /* not lint */ #endif #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void usage(void); static u_int get_pageins(void); static int dohalt; int main(int argc, char *argv[]) { struct utmpx utx; const struct passwd *pw; int ch, howto, i, fd, lflag, nflag, qflag, sverrno, Nflag; u_int pageins; const char *user, *kernel = NULL; if (strcmp(getprogname(), "halt") == 0) { dohalt = 1; howto = RB_HALT; } else howto = 0; lflag = nflag = qflag = Nflag = 0; while ((ch = getopt(argc, argv, "cdk:lNnpqr")) != -1) switch(ch) { case 'c': howto |= RB_POWERCYCLE; break; case 'd': howto |= RB_DUMP; break; case 'k': kernel = optarg; break; case 'l': lflag = 1; break; case 'n': nflag = 1; howto |= RB_NOSYNC; break; case 'N': nflag = 1; Nflag = 1; break; case 'p': howto |= RB_POWEROFF; break; case 'q': qflag = 1; break; case 'r': howto |= RB_REROOT; break; case '?': default: usage(); } argc -= optind; argv += optind; if ((howto & (RB_DUMP | RB_HALT)) == (RB_DUMP | RB_HALT)) errx(1, "cannot dump (-d) when halting; must reboot instead"); if (Nflag && (howto & RB_NOSYNC) != 0) errx(1, "-N cannot be used with -n"); if ((howto & RB_POWEROFF) && (howto & RB_POWERCYCLE)) errx(1, "-c and -p cannot be used together"); if ((howto & RB_REROOT) != 0 && howto != RB_REROOT) errx(1, "-r cannot be used with -c, -d, -n, or -p"); if (geteuid()) { errno = EPERM; err(1, NULL); } if (qflag) { reboot(howto); err(1, NULL); } if (kernel != NULL) { fd = open("/boot/nextboot.conf", O_WRONLY | O_CREAT | O_TRUNC, 0444); if (fd > -1) { (void)write(fd, "nextboot_enable=\"YES\"\n", 22); (void)write(fd, "kernel=\"", 8L); (void)write(fd, kernel, strlen(kernel)); (void)write(fd, "\"\n", 2); close(fd); } } /* Log the reboot. */ if (!lflag) { if ((user = getlogin()) == NULL) user = (pw = getpwuid(getuid())) ? pw->pw_name : "???"; if (dohalt) { openlog("halt", 0, LOG_AUTH | LOG_CONS); syslog(LOG_CRIT, "halted by %s", user); } else if (howto & RB_REROOT) { openlog("reroot", 0, LOG_AUTH | LOG_CONS); syslog(LOG_CRIT, "rerooted by %s", user); } else if (howto & RB_POWEROFF) { openlog("reboot", 0, LOG_AUTH | LOG_CONS); syslog(LOG_CRIT, "powered off by %s", user); } else if (howto & RB_POWERCYCLE) { openlog("reboot", 0, LOG_AUTH | LOG_CONS); syslog(LOG_CRIT, "power cycled by %s", user); } else { openlog("reboot", 0, LOG_AUTH | LOG_CONS); syslog(LOG_CRIT, "rebooted by %s", user); } } utx.ut_type = SHUTDOWN_TIME; gettimeofday(&utx.ut_tv, NULL); pututxline(&utx); /* * Do a sync early on, so disks start transfers while we're off * killing processes. Don't worry about writes done before the * processes die, the reboot system call syncs the disks. */ if (!nflag) sync(); /* * Ignore signals that we can get as a result of killing * parents, group leaders, etc. */ (void)signal(SIGHUP, SIG_IGN); (void)signal(SIGINT, SIG_IGN); (void)signal(SIGQUIT, SIG_IGN); (void)signal(SIGTERM, SIG_IGN); (void)signal(SIGTSTP, SIG_IGN); /* * If we're running in a pipeline, we don't want to die * after killing whatever we're writing to. */ (void)signal(SIGPIPE, SIG_IGN); /* * Only init(8) can perform rerooting. */ if (howto & RB_REROOT) { if (kill(1, SIGEMT) == -1) err(1, "SIGEMT init"); return (0); } /* Just stop init -- if we fail, we'll restart it. */ if (kill(1, SIGTSTP) == -1) err(1, "SIGTSTP init"); /* Send a SIGTERM first, a chance to save the buffers. */ if (kill(-1, SIGTERM) == -1 && errno != ESRCH) err(1, "SIGTERM processes"); /* * After the processes receive the signal, start the rest of the * buffers on their way. Wait 5 seconds between the SIGTERM and * the SIGKILL to give everybody a chance. If there is a lot of * paging activity then wait longer, up to a maximum of approx * 60 seconds. */ sleep(2); for (i = 0; i < 20; i++) { pageins = get_pageins(); if (!nflag) sync(); sleep(3); if (get_pageins() == pageins) break; } for (i = 1;; ++i) { if (kill(-1, SIGKILL) == -1) { if (errno == ESRCH) break; goto restart; } if (i > 5) { (void)fprintf(stderr, "WARNING: some process(es) wouldn't die\n"); break; } (void)sleep(2 * i); } reboot(howto); /* FALLTHROUGH */ restart: sverrno = errno; errx(1, "%s%s", kill(1, SIGHUP) == -1 ? "(can't restart init): " : "", strerror(sverrno)); /* NOTREACHED */ } static void usage(void) { (void)fprintf(stderr, dohalt ? - "usage: halt [-lNnpq] [-k kernel]\n" : - "usage: reboot [-dlNnpqr] [-k kernel]\n"); + "usage: halt [-clNnpq] [-k kernel]\n" : + "usage: reboot [-cdlNnpqr] [-k kernel]\n"); exit(1); } static u_int get_pageins(void) { u_int pageins; size_t len; len = sizeof(pageins); if (sysctlbyname("vm.stats.vm.v_swappgsin", &pageins, &len, NULL, 0) != 0) { warnx("v_swappgsin"); return (0); } return pageins; } Index: projects/runtime-coverage/sbin/shutdown/shutdown.c =================================================================== --- projects/runtime-coverage/sbin/shutdown/shutdown.c (revision 325209) +++ projects/runtime-coverage/sbin/shutdown/shutdown.c (revision 325210) @@ -1,572 +1,572 @@ /* * Copyright (c) 1988, 1990, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if 0 #ifndef lint static const char copyright[] = "@(#) Copyright (c) 1988, 1990, 1993\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #ifndef lint static char sccsid[] = "@(#)shutdown.c 8.4 (Berkeley) 4/28/95"; #endif /* not lint */ #endif #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEBUG #undef _PATH_NOLOGIN #define _PATH_NOLOGIN "./nologin" #endif #define H *60*60 #define M *60 #define S *1 #define NOLOG_TIME 5*60 static struct interval { int timeleft, timetowait; } tlist[] = { { 10 H, 5 H }, { 5 H, 3 H }, { 2 H, 1 H }, { 1 H, 30 M }, { 30 M, 10 M }, { 20 M, 10 M }, { 10 M, 5 M }, { 5 M, 3 M }, { 2 M, 1 M }, { 1 M, 30 S }, { 30 S, 30 S }, { 0 , 0 } }; #undef H #undef M #undef S static time_t offset, shuttime; static int docycle, dohalt, dopower, doreboot, killflg, mbuflen, oflag; static char mbuf[BUFSIZ]; static const char *nosync, *whom; static void badtime(void); static void die_you_gravy_sucking_pig_dog(void); static void finish(int); static void getoffset(char *); static void loop(void); static void nolog(void); static void timeout(int); static void timewarn(int); static void usage(const char *); extern const char **environ; int main(int argc, char **argv) { char *p, *endp; struct passwd *pw; int arglen, ch, len, readstdin; #ifndef DEBUG if (geteuid()) errx(1, "NOT super-user"); #endif nosync = NULL; readstdin = 0; /* * Test for the special case where the utility is called as * "poweroff", for which it runs 'shutdown -p now'. */ if ((p = strrchr(argv[0], '/')) == NULL) p = argv[0]; else ++p; if (strcmp(p, "poweroff") == 0) { if (getopt(argc, argv, "") != -1) usage((char *)NULL); argc -= optind; argv += optind; if (argc != 0) usage((char *)NULL); dopower = 1; offset = 0; (void)time(&shuttime); goto poweroff; } while ((ch = getopt(argc, argv, "-chknopr")) != -1) switch (ch) { case '-': readstdin = 1; break; case 'c': docycle = 1; break; case 'h': dohalt = 1; break; case 'k': killflg = 1; break; case 'n': nosync = "-n"; break; case 'o': oflag = 1; break; case 'p': dopower = 1; break; case 'r': doreboot = 1; break; case '?': default: usage((char *)NULL); } argc -= optind; argv += optind; if (argc < 1) usage((char *)NULL); if (killflg + doreboot + dohalt + dopower + docycle > 1) usage("incompatible switches -c, -h, -k, -p and -r"); if (oflag && !(dohalt || dopower || doreboot || docycle)) usage("-o requires -c, -h, -p or -r"); if (nosync != NULL && !oflag) usage("-n requires -o"); getoffset(*argv++); poweroff: if (*argv) { for (p = mbuf, len = sizeof(mbuf); *argv; ++argv) { arglen = strlen(*argv); if ((len -= arglen) <= 2) break; if (p != mbuf) *p++ = ' '; memmove(p, *argv, arglen); p += arglen; } *p = '\n'; *++p = '\0'; } if (readstdin) { p = mbuf; endp = mbuf + sizeof(mbuf) - 2; for (;;) { if (!fgets(p, endp - p + 1, stdin)) break; for (; *p && p < endp; ++p); if (p == endp) { *p = '\n'; *++p = '\0'; break; } } } mbuflen = strlen(mbuf); if (offset) (void)printf("Shutdown at %.24s.\n", ctime(&shuttime)); else (void)printf("Shutdown NOW!\n"); if (!(whom = getlogin())) whom = (pw = getpwuid(getuid())) ? pw->pw_name : "???"; #ifdef DEBUG (void)putc('\n', stdout); #else (void)setpriority(PRIO_PROCESS, 0, PRIO_MIN); { int forkpid; forkpid = fork(); if (forkpid == -1) err(1, "fork"); if (forkpid) errx(0, "[pid %d]", forkpid); } setsid(); #endif openlog("shutdown", LOG_CONS, LOG_AUTH); loop(); return(0); } static void loop(void) { struct interval *tp; u_int sltime; int logged; if (offset <= NOLOG_TIME) { logged = 1; nolog(); } else logged = 0; tp = tlist; if (tp->timeleft < offset) (void)sleep((u_int)(offset - tp->timeleft)); else { while (tp->timeleft && offset < tp->timeleft) ++tp; /* * Warn now, if going to sleep more than a fifth of * the next wait time. */ if ((sltime = offset - tp->timeleft)) { if (sltime > (u_int)(tp->timetowait / 5)) timewarn(offset); (void)sleep(sltime); } } for (;; ++tp) { timewarn(tp->timeleft); if (!logged && tp->timeleft <= NOLOG_TIME) { logged = 1; nolog(); } (void)sleep((u_int)tp->timetowait); if (!tp->timeleft) break; } die_you_gravy_sucking_pig_dog(); } static jmp_buf alarmbuf; static const char *restricted_environ[] = { "PATH=" _PATH_STDPATH, NULL }; static void timewarn(int timeleft) { static int first; static char hostname[MAXHOSTNAMELEN + 1]; FILE *pf; char wcmd[MAXPATHLEN + 4]; if (!first++) (void)gethostname(hostname, sizeof(hostname)); /* undoc -n option to wall suppresses normal wall banner */ (void)snprintf(wcmd, sizeof(wcmd), "%s -n", _PATH_WALL); environ = restricted_environ; if (!(pf = popen(wcmd, "w"))) { syslog(LOG_ERR, "shutdown: can't find %s: %m", _PATH_WALL); return; } (void)fprintf(pf, "\007*** %sSystem shutdown message from %s@%s ***\007\n", timeleft ? "": "FINAL ", whom, hostname); if (timeleft > 10*60) (void)fprintf(pf, "System going down at %5.5s\n\n", ctime(&shuttime) + 11); else if (timeleft > 59) (void)fprintf(pf, "System going down in %d minute%s\n\n", timeleft / 60, (timeleft > 60) ? "s" : ""); else if (timeleft) (void)fprintf(pf, "System going down in %s30 seconds\n\n", (offset > 0 && offset < 30 ? "less than " : "")); else (void)fprintf(pf, "System going down IMMEDIATELY\n\n"); if (mbuflen) (void)fwrite(mbuf, sizeof(*mbuf), mbuflen, pf); /* * play some games, just in case wall doesn't come back * probably unnecessary, given that wall is careful. */ if (!setjmp(alarmbuf)) { (void)signal(SIGALRM, timeout); (void)alarm((u_int)30); (void)pclose(pf); (void)alarm((u_int)0); (void)signal(SIGALRM, SIG_DFL); } } static void timeout(int signo __unused) { longjmp(alarmbuf, 1); } static void die_you_gravy_sucking_pig_dog(void) { char *empty_environ[] = { NULL }; syslog(LOG_NOTICE, "%s by %s: %s", doreboot ? "reboot" : dohalt ? "halt" : dopower ? "power-down" : docycle ? "power-cycle" : "shutdown", whom, mbuf); (void)printf("\r\nSystem shutdown time has arrived\007\007\r\n"); if (killflg) { (void)printf("\rbut you'll have to do it yourself\r\n"); exit(0); } #ifdef DEBUG if (doreboot) (void)printf("reboot"); else if (docycle) (void)printf("power-cycle"); else if (dohalt) (void)printf("halt"); else if (dopower) (void)printf("power-down"); if (nosync != NULL) (void)printf(" no sync"); (void)printf("\nkill -HUP 1\n"); #else if (!oflag) { (void)kill(1, doreboot ? SIGINT : /* reboot */ dohalt ? SIGUSR1 : /* halt */ dopower ? SIGUSR2 : /* power-down */ docycle ? SIGWINCH : /* power-cycle */ SIGTERM); /* single-user */ } else { if (doreboot) { execle(_PATH_REBOOT, "reboot", "-l", nosync, (char *)NULL, empty_environ); syslog(LOG_ERR, "shutdown: can't exec %s: %m.", _PATH_REBOOT); warn(_PATH_REBOOT); } else if (dohalt) { execle(_PATH_HALT, "halt", "-l", nosync, (char *)NULL, empty_environ); syslog(LOG_ERR, "shutdown: can't exec %s: %m.", _PATH_HALT); warn(_PATH_HALT); } else if (dopower) { execle(_PATH_HALT, "halt", "-l", "-p", nosync, (char *)NULL, empty_environ); syslog(LOG_ERR, "shutdown: can't exec %s: %m.", _PATH_HALT); warn(_PATH_HALT); } else if (docycle) { execle(_PATH_HALT, "halt", "-l", "-c", nosync, (char *)NULL, empty_environ); syslog(LOG_ERR, "shutdown: can't exec %s: %m.", _PATH_HALT); warn(_PATH_HALT); } (void)kill(1, SIGTERM); /* to single-user */ } #endif finish(0); } #define ATOI2(p) (p[0] - '0') * 10 + (p[1] - '0'); p += 2; static void getoffset(char *timearg) { struct tm *lt; char *p; time_t now; int this_year; char *timeunit; (void)time(&now); if (!strcasecmp(timearg, "now")) { /* now */ offset = 0; shuttime = now; return; } if (*timearg == '+') { /* +minutes */ if (!isdigit(*++timearg)) badtime(); errno = 0; offset = strtol(timearg, &timeunit, 10); if (offset < 0 || offset == LONG_MAX || errno != 0) badtime(); if (timeunit[0] == '\0' || strcasecmp(timeunit, "m") == 0 || strcasecmp(timeunit, "min") == 0 || strcasecmp(timeunit, "mins") == 0) { offset *= 60; } else if (strcasecmp(timeunit, "h") == 0 || strcasecmp(timeunit, "hour") == 0 || strcasecmp(timeunit, "hours") == 0) { offset *= 60 * 60; } else if (strcasecmp(timeunit, "s") == 0 || strcasecmp(timeunit, "sec") == 0 || strcasecmp(timeunit, "secs") == 0) { offset *= 1; } else { badtime(); } shuttime = now + offset; return; } /* handle hh:mm by getting rid of the colon */ for (p = timearg; *p; ++p) if (!isascii(*p) || !isdigit(*p)) { if (*p == ':' && strlen(p) == 3) { p[0] = p[1]; p[1] = p[2]; p[2] = '\0'; } else badtime(); } unsetenv("TZ"); /* OUR timezone */ lt = localtime(&now); /* current time val */ switch(strlen(timearg)) { case 10: this_year = lt->tm_year; lt->tm_year = ATOI2(timearg); /* * check if the specified year is in the next century. * allow for one year of user error as many people will * enter n - 1 at the start of year n. */ if (lt->tm_year < (this_year % 100) - 1) lt->tm_year += 100; /* adjust for the year 2000 and beyond */ lt->tm_year += (this_year - (this_year % 100)); /* FALLTHROUGH */ case 8: lt->tm_mon = ATOI2(timearg); if (--lt->tm_mon < 0 || lt->tm_mon > 11) badtime(); /* FALLTHROUGH */ case 6: lt->tm_mday = ATOI2(timearg); if (lt->tm_mday < 1 || lt->tm_mday > 31) badtime(); /* FALLTHROUGH */ case 4: lt->tm_hour = ATOI2(timearg); if (lt->tm_hour < 0 || lt->tm_hour > 23) badtime(); lt->tm_min = ATOI2(timearg); if (lt->tm_min < 0 || lt->tm_min > 59) badtime(); lt->tm_sec = 0; if ((shuttime = mktime(lt)) == -1) badtime(); if ((offset = shuttime - now) < 0) errx(1, "that time is already past."); break; default: badtime(); } } #define NOMSG "\n\nNO LOGINS: System going down at " static void nolog(void) { int logfd; char *ct; (void)unlink(_PATH_NOLOGIN); /* in case linked to another file */ (void)signal(SIGINT, finish); (void)signal(SIGHUP, finish); (void)signal(SIGQUIT, finish); (void)signal(SIGTERM, finish); if ((logfd = open(_PATH_NOLOGIN, O_WRONLY|O_CREAT|O_TRUNC, 0664)) >= 0) { (void)write(logfd, NOMSG, sizeof(NOMSG) - 1); ct = ctime(&shuttime); (void)write(logfd, ct + 11, 5); (void)write(logfd, "\n\n", 2); (void)write(logfd, mbuf, strlen(mbuf)); (void)close(logfd); } } static void finish(int signo __unused) { if (!killflg) (void)unlink(_PATH_NOLOGIN); exit(0); } static void badtime(void) { errx(1, "bad time format"); } static void usage(const char *cp) { if (cp != NULL) warnx("%s", cp); (void)fprintf(stderr, - "usage: shutdown [-] [-h | -p | -r | -k] [-o [-n]] time [warning-message ...]\n" + "usage: shutdown [-] [-c | -h | -p | -r | -k] [-o [-n]] time [warning-message ...]\n" " poweroff\n"); exit(1); } Index: projects/runtime-coverage/sys/contrib/ncsw/Peripherals/FM/Port/fm_port.c =================================================================== --- projects/runtime-coverage/sys/contrib/ncsw/Peripherals/FM/Port/fm_port.c (revision 325209) +++ projects/runtime-coverage/sys/contrib/ncsw/Peripherals/FM/Port/fm_port.c (revision 325210) @@ -1,6438 +1,6438 @@ /* * Copyright 2008-2012 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /****************************************************************************** @File fm_port.c @Description FM driver routines implementation. *//***************************************************************************/ #include "error_ext.h" #include "std_ext.h" #include "string_ext.h" #include "sprint_ext.h" #include "debug_ext.h" #include "fm_muram_ext.h" #include "fman_common.h" #include "fm_port.h" #include "fm_port_dsar.h" #include "common/general.h" /****************************************/ /* static functions */ /****************************************/ static t_Error FmPortConfigAutoResForDeepSleepSupport1(t_FmPort *p_FmPort); static t_Error CheckInitParameters(t_FmPort *p_FmPort) { t_FmPortDriverParam *p_Params = p_FmPort->p_FmPortDriverParam; struct fman_port_cfg *p_DfltConfig = &p_Params->dfltCfg; t_Error ans = E_OK; uint32_t unusedMask; if (p_FmPort->imEn) { if (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) if (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth > 2) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("fifoDeqPipelineDepth for IM 10G can't be larger than 2")); if ((ans = FmPortImCheckInitParameters(p_FmPort)) != E_OK) return ERROR_CODE(ans); } else { /****************************************/ /* Rx only */ /****************************************/ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) { /* external buffer pools */ if (!p_Params->extBufPools.numOfPoolsUsed) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("extBufPools.numOfPoolsUsed=0. At least one buffer pool must be defined")); if (FmSpCheckBufPoolsParams(&p_Params->extBufPools, p_Params->p_BackupBmPools, &p_Params->bufPoolDepletion) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); /* Check that part of IC that needs copying is small enough to enter start margin */ if (p_Params->intContext.size && (p_Params->intContext.size + p_Params->intContext.extBufOffset > p_Params->bufMargins.startMargins)) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.size is larger than start margins")); if ((p_Params->liodnOffset != (uint16_t)DPAA_LIODN_DONT_OVERRIDE) && (p_Params->liodnOffset & ~FM_LIODN_OFFSET_MASK)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("liodnOffset is larger than %d", FM_LIODN_OFFSET_MASK+1)); #ifdef FM_NO_BACKUP_POOLS if ((p_FmPort->fmRevInfo.majorRev != 4) && (p_FmPort->fmRevInfo.majorRev < 6)) if (p_FmPort->p_FmPortDriverParam->p_BackupBmPools) RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("BackupBmPools")); #endif /* FM_NO_BACKUP_POOLS */ } /****************************************/ /* Non Rx ports */ /****************************************/ else { if (p_Params->deqSubPortal >= FM_MAX_NUM_OF_SUB_PORTALS) RETURN_ERROR( MAJOR, E_INVALID_VALUE, (" deqSubPortal has to be in the range of 0 - %d", FM_MAX_NUM_OF_SUB_PORTALS)); /* to protect HW internal-context from overwrite */ if ((p_Params->intContext.size) && (p_Params->intContext.intContextOffset < MIN_TX_INT_OFFSET)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("non-Rx intContext.intContextOffset can't be smaller than %d", MIN_TX_INT_OFFSET)); if ((p_FmPort->portType == e_FM_PORT_TYPE_TX) || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G) /* in O/H DEFAULT_notSupported indicates that it is not supported and should not be checked */ || (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth != DEFAULT_notSupported)) { /* Check that not larger than 8 */ if ((!p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth) || (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth > MAX_FIFO_PIPELINE_DEPTH)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("fifoDeqPipelineDepth can't be larger than %d", MAX_FIFO_PIPELINE_DEPTH)); } } /****************************************/ /* Rx Or Offline Parsing */ /****************************************/ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) { if (!p_Params->dfltFqid) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dfltFqid must be between 1 and 2^24-1")); #if defined(FM_CAPWAP_SUPPORT) && defined(FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004) if (p_FmPort->p_FmPortDriverParam->bufferPrefixContent.manipExtraSpace % 16) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufferPrefixContent.manipExtraSpace has to be devidable by 16")); #endif /* defined(FM_CAPWAP_SUPPORT) && ... */ } /****************************************/ /* All ports */ /****************************************/ /* common BMI registers values */ /* Check that Queue Id is not larger than 2^24, and is not 0 */ if ((p_Params->errFqid & ~0x00FFFFFF) || !p_Params->errFqid) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("errFqid must be between 1 and 2^24-1")); if (p_Params->dfltFqid & ~0x00FFFFFF) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dfltFqid must be between 1 and 2^24-1")); } /****************************************/ /* Rx only */ /****************************************/ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) { if (p_DfltConfig->rx_pri_elevation % BMI_FIFO_UNITS) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("rxFifoPriElevationLevel has to be divisible by %d", BMI_FIFO_UNITS)); if ((p_DfltConfig->rx_pri_elevation < BMI_FIFO_UNITS) || (p_DfltConfig->rx_pri_elevation > MAX_PORT_FIFO_SIZE)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("rxFifoPriElevationLevel has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE)); if (p_DfltConfig->rx_fifo_thr % BMI_FIFO_UNITS) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("rxFifoThreshold has to be divisible by %d", BMI_FIFO_UNITS)); if ((p_DfltConfig->rx_fifo_thr < BMI_FIFO_UNITS) || (p_DfltConfig->rx_fifo_thr > MAX_PORT_FIFO_SIZE)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("rxFifoThreshold has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE)); /* Check that not larger than 16 */ if (p_DfltConfig->rx_cut_end_bytes > FRAME_END_DATA_SIZE) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("cutBytesFromEnd can't be larger than %d", FRAME_END_DATA_SIZE)); if (FmSpCheckBufMargins(&p_Params->bufMargins) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); /* extra FIFO size (allowed only to Rx ports) */ if (p_Params->setSizeOfFifo && (p_FmPort->fifoBufs.extra % BMI_FIFO_UNITS)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("fifoBufs.extra has to be divisible by %d", BMI_FIFO_UNITS)); if (p_Params->bufPoolDepletion.poolsGrpModeEnable && !p_Params->bufPoolDepletion.numOfPools) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("bufPoolDepletion.numOfPools can not be 0 when poolsGrpModeEnable=TRUE")); #ifdef FM_CSI_CFED_LIMIT if (p_FmPort->fmRevInfo.majorRev == 4) { /* Check that not larger than 16 */ if (p_DfltConfig->rx_cut_end_bytes + p_DfltConfig->checksum_bytes_ignore > FRAME_END_DATA_SIZE) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("cheksumLastBytesIgnore + cutBytesFromEnd can't be larger than %d", FRAME_END_DATA_SIZE)); } #endif /* FM_CSI_CFED_LIMIT */ } /****************************************/ /* Non Rx ports */ /****************************************/ /* extra FIFO size (allowed only to Rx ports) */ else if (p_FmPort->fifoBufs.extra) RETURN_ERROR(MAJOR, E_INVALID_VALUE, (" No fifoBufs.extra for non Rx ports")); /****************************************/ /* Tx only */ /****************************************/ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX) || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)) { if (p_DfltConfig->tx_fifo_min_level % BMI_FIFO_UNITS) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("txFifoMinFillLevel has to be divisible by %d", BMI_FIFO_UNITS)); if (p_DfltConfig->tx_fifo_min_level > (MAX_PORT_FIFO_SIZE - 256)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("txFifoMinFillLevel has to be in the range of 0 - %d", (MAX_PORT_FIFO_SIZE - 256))); if (p_DfltConfig->tx_fifo_low_comf_level % BMI_FIFO_UNITS) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("txFifoLowComfLevel has to be divisible by %d", BMI_FIFO_UNITS)); if ((p_DfltConfig->tx_fifo_low_comf_level < BMI_FIFO_UNITS) || (p_DfltConfig->tx_fifo_low_comf_level > MAX_PORT_FIFO_SIZE)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("txFifoLowComfLevel has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE)); if (p_FmPort->portType == e_FM_PORT_TYPE_TX) if (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth > 2) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("fifoDeqPipelineDepth for 1G can't be larger than 2")); } /****************************************/ /* Non Tx Ports */ /****************************************/ /* If discard override was selected , no frames may be discarded. */ else if (p_DfltConfig->discard_override && p_Params->errorsToDiscard) RETURN_ERROR( MAJOR, E_CONFLICT, ("errorsToDiscard is not empty, but frmDiscardOverride selected (all discarded frames to be enqueued to error queue).")); /****************************************/ /* Rx and Offline parsing */ /****************************************/ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) { if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) unusedMask = BMI_STATUS_OP_MASK_UNUSED; else unusedMask = BMI_STATUS_RX_MASK_UNUSED; /* Check that no common bits with BMI_STATUS_MASK_UNUSED */ if (p_Params->errorsToDiscard & unusedMask) RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("errorsToDiscard contains undefined bits")); } /****************************************/ /* Offline Ports */ /****************************************/ #ifdef FM_OP_OPEN_DMA_MIN_LIMIT if ((p_FmPort->fmRevInfo.majorRev >= 6) && (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) && p_Params->setNumOfOpenDmas && (p_FmPort->openDmas.num < MIN_NUM_OF_OP_DMAS)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("For Offline port, openDmas.num can't be smaller than %d", MIN_NUM_OF_OP_DMAS)); #endif /* FM_OP_OPEN_DMA_MIN_LIMIT */ /****************************************/ /* Offline & HC Ports */ /****************************************/ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) { #ifndef FM_FRAME_END_PARAMS_FOR_OP if ((p_FmPort->fmRevInfo.majorRev < 6) && (p_FmPort->p_FmPortDriverParam->cheksumLastBytesIgnore != DEFAULT_notSupported)) /* this is an indication that user called config for this mode which is not supported in this integration */ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("cheksumLastBytesIgnore is available for Rx & Tx ports only")); #endif /* !FM_FRAME_END_PARAMS_FOR_OP */ #ifndef FM_DEQ_PIPELINE_PARAMS_FOR_OP if ((!((p_FmPort->fmRevInfo.majorRev == 4) || (p_FmPort->fmRevInfo.majorRev >= 6))) && (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth != DEFAULT_notSupported)) /* this is an indication that user called config for this mode which is not supported in this integration */ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("fifoDeqPipelineDepth is available for Tx ports only")); #endif /* !FM_DEQ_PIPELINE_PARAMS_FOR_OP */ } /****************************************/ /* All ports */ /****************************************/ /* Check that not larger than 16 */ if ((p_Params->cheksumLastBytesIgnore > FRAME_END_DATA_SIZE) && ((p_Params->cheksumLastBytesIgnore != DEFAULT_notSupported))) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("cheksumLastBytesIgnore can't be larger than %d", FRAME_END_DATA_SIZE)); if (FmSpCheckIntContextParams(&p_Params->intContext) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); /* common BMI registers values */ if (p_Params->setNumOfTasks && ((!p_FmPort->tasks.num) || (p_FmPort->tasks.num > MAX_NUM_OF_TASKS))) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("tasks.num can't be larger than %d", MAX_NUM_OF_TASKS)); if (p_Params->setNumOfTasks && (p_FmPort->tasks.extra > MAX_NUM_OF_EXTRA_TASKS)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("tasks.extra can't be larger than %d", MAX_NUM_OF_EXTRA_TASKS)); if (p_Params->setNumOfOpenDmas && ((!p_FmPort->openDmas.num) || (p_FmPort->openDmas.num > MAX_NUM_OF_DMAS))) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("openDmas.num can't be larger than %d", MAX_NUM_OF_DMAS)); if (p_Params->setNumOfOpenDmas && (p_FmPort->openDmas.extra > MAX_NUM_OF_EXTRA_DMAS)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("openDmas.extra can't be larger than %d", MAX_NUM_OF_EXTRA_DMAS)); if (p_Params->setSizeOfFifo && (!p_FmPort->fifoBufs.num || (p_FmPort->fifoBufs.num > MAX_PORT_FIFO_SIZE))) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("fifoBufs.num has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE)); if (p_Params->setSizeOfFifo && (p_FmPort->fifoBufs.num % BMI_FIFO_UNITS)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("fifoBufs.num has to be divisible by %d", BMI_FIFO_UNITS)); #ifdef FM_QMI_NO_DEQ_OPTIONS_SUPPORT if (p_FmPort->fmRevInfo.majorRev == 4) if (p_FmPort->p_FmPortDriverParam->deqPrefetchOption != DEFAULT_notSupported) /* this is an indication that user called config for this mode which is not supported in this integration */ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("deqPrefetchOption")); #endif /* FM_QMI_NO_DEQ_OPTIONS_SUPPORT */ return E_OK; } static t_Error VerifySizeOfFifo(t_FmPort *p_FmPort) { uint32_t minFifoSizeRequired = 0, optFifoSizeForB2B = 0; /*************************/ /* TX PORTS */ /*************************/ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX) || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)) { minFifoSizeRequired = (uint32_t)(ROUND_UP(p_FmPort->maxFrameLength, BMI_FIFO_UNITS) + (3 * BMI_FIFO_UNITS)); if (!p_FmPort->imEn) minFifoSizeRequired += p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth * BMI_FIFO_UNITS; optFifoSizeForB2B = minFifoSizeRequired; /* Add some margin for back-to-back capability to improve performance, allows the hardware to pipeline new frame dma while the previous frame not yet transmitted. */ if (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G) optFifoSizeForB2B += 3 * BMI_FIFO_UNITS; else optFifoSizeForB2B += 2 * BMI_FIFO_UNITS; } /*************************/ /* RX IM PORTS */ /*************************/ else if (((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) && p_FmPort->imEn) { optFifoSizeForB2B = minFifoSizeRequired = (uint32_t)(ROUND_UP(p_FmPort->maxFrameLength, BMI_FIFO_UNITS) + (4 * BMI_FIFO_UNITS)); } /*************************/ /* RX non-IM PORTS */ /*************************/ else if (((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) && !p_FmPort->imEn) { if (p_FmPort->fmRevInfo.majorRev == 4) { if (p_FmPort->rxPoolsParams.numOfPools == 1) minFifoSizeRequired = 8 * BMI_FIFO_UNITS; else minFifoSizeRequired = (uint32_t)(ROUND_UP(p_FmPort->rxPoolsParams.secondLargestBufSize, BMI_FIFO_UNITS) + (7 * BMI_FIFO_UNITS)); } else { #if (DPAA_VERSION >= 11) minFifoSizeRequired = (uint32_t)(ROUND_UP(p_FmPort->maxFrameLength, BMI_FIFO_UNITS) + (5 * BMI_FIFO_UNITS)); /* 4 according to spec + 1 for FOF>0 */ #else minFifoSizeRequired = (uint32_t) (ROUND_UP(MIN(p_FmPort->maxFrameLength, p_FmPort->rxPoolsParams.largestBufSize), BMI_FIFO_UNITS) + (7*BMI_FIFO_UNITS)); #endif /* (DPAA_VERSION >= 11) */ } optFifoSizeForB2B = minFifoSizeRequired; /* Add some margin for back-to-back capability to improve performance, allows the hardware to pipeline new frame dma while the previous frame not yet transmitted. */ if (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) optFifoSizeForB2B += 8 * BMI_FIFO_UNITS; else optFifoSizeForB2B += 3 * BMI_FIFO_UNITS; } /* For O/H ports, check fifo size and update if necessary */ else if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) { #if (DPAA_VERSION >= 11) optFifoSizeForB2B = minFifoSizeRequired = (uint32_t)(ROUND_UP(p_FmPort->maxFrameLength, BMI_FIFO_UNITS) + ((p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth + 5) * BMI_FIFO_UNITS)); /* 4 according to spec + 1 for FOF>0 */ #else optFifoSizeForB2B = minFifoSizeRequired = (uint32_t)((p_FmPort->tasks.num + 2) * BMI_FIFO_UNITS); #endif /* (DPAA_VERSION >= 11) */ } ASSERT_COND(minFifoSizeRequired > 0); ASSERT_COND(optFifoSizeForB2B >= minFifoSizeRequired); /* Verify the size */ if (p_FmPort->fifoBufs.num < minFifoSizeRequired) DBG(INFO, ("FIFO size is %d and should be enlarged to %d bytes",p_FmPort->fifoBufs.num, minFifoSizeRequired)); else if (p_FmPort->fifoBufs.num < optFifoSizeForB2B) DBG(INFO, ("For back-to-back frames processing, FIFO size is %d and needs to enlarge to %d bytes", p_FmPort->fifoBufs.num, optFifoSizeForB2B)); return E_OK; } static void FmPortDriverParamFree(t_FmPort *p_FmPort) { if (p_FmPort->p_FmPortDriverParam) { XX_Free(p_FmPort->p_FmPortDriverParam); p_FmPort->p_FmPortDriverParam = NULL; } } static t_Error SetExtBufferPools(t_FmPort *p_FmPort) { t_FmExtPools *p_ExtBufPools = &p_FmPort->p_FmPortDriverParam->extBufPools; t_FmBufPoolDepletion *p_BufPoolDepletion = &p_FmPort->p_FmPortDriverParam->bufPoolDepletion; uint8_t orderedArray[FM_PORT_MAX_NUM_OF_EXT_POOLS]; uint16_t sizesArray[BM_MAX_NUM_OF_POOLS]; int i = 0, j = 0, err; struct fman_port_bpools bpools; memset(&orderedArray, 0, sizeof(uint8_t) * FM_PORT_MAX_NUM_OF_EXT_POOLS); memset(&sizesArray, 0, sizeof(uint16_t) * BM_MAX_NUM_OF_POOLS); memcpy(&p_FmPort->extBufPools, p_ExtBufPools, sizeof(t_FmExtPools)); FmSpSetBufPoolsInAscOrderOfBufSizes(p_ExtBufPools, orderedArray, sizesArray); /* Prepare flibs bpools structure */ memset(&bpools, 0, sizeof(struct fman_port_bpools)); bpools.count = p_ExtBufPools->numOfPoolsUsed; bpools.counters_enable = TRUE; for (i = 0; i < p_ExtBufPools->numOfPoolsUsed; i++) { bpools.bpool[i].bpid = orderedArray[i]; bpools.bpool[i].size = sizesArray[orderedArray[i]]; /* functionality available only for some derivatives (limited by config) */ if (p_FmPort->p_FmPortDriverParam->p_BackupBmPools) for (j = 0; j < p_FmPort->p_FmPortDriverParam->p_BackupBmPools->numOfBackupPools; j++) if (orderedArray[i] == p_FmPort->p_FmPortDriverParam->p_BackupBmPools->poolIds[j]) { bpools.bpool[i].is_backup = TRUE; break; } } /* save pools parameters for later use */ p_FmPort->rxPoolsParams.numOfPools = p_ExtBufPools->numOfPoolsUsed; p_FmPort->rxPoolsParams.largestBufSize = sizesArray[orderedArray[p_ExtBufPools->numOfPoolsUsed - 1]]; p_FmPort->rxPoolsParams.secondLargestBufSize = sizesArray[orderedArray[p_ExtBufPools->numOfPoolsUsed - 2]]; /* FMBM_RMPD reg. - pool depletion */ if (p_BufPoolDepletion->poolsGrpModeEnable) { bpools.grp_bp_depleted_num = p_BufPoolDepletion->numOfPools; for (i = 0; i < BM_MAX_NUM_OF_POOLS; i++) { if (p_BufPoolDepletion->poolsToConsider[i]) { for (j = 0; j < p_ExtBufPools->numOfPoolsUsed; j++) { if (i == orderedArray[j]) { bpools.bpool[j].grp_bp_depleted = TRUE; break; } } } } } if (p_BufPoolDepletion->singlePoolModeEnable) { for (i = 0; i < BM_MAX_NUM_OF_POOLS; i++) { if (p_BufPoolDepletion->poolsToConsiderForSingleMode[i]) { for (j = 0; j < p_ExtBufPools->numOfPoolsUsed; j++) { if (i == orderedArray[j]) { bpools.bpool[j].single_bp_depleted = TRUE; break; } } } } } #if (DPAA_VERSION >= 11) /* fill QbbPEV */ if (p_BufPoolDepletion->poolsGrpModeEnable || p_BufPoolDepletion->singlePoolModeEnable) { for (i = 0; i < FM_MAX_NUM_OF_PFC_PRIORITIES; i++) { if (p_BufPoolDepletion->pfcPrioritiesEn[i] == TRUE) { bpools.bpool[i].pfc_priorities_en = TRUE; } } } #endif /* (DPAA_VERSION >= 11) */ /* Issue flibs function */ err = fman_port_set_bpools(&p_FmPort->port, &bpools); if (err != 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_bpools")); if (p_FmPort->p_FmPortDriverParam->p_BackupBmPools) XX_Free(p_FmPort->p_FmPortDriverParam->p_BackupBmPools); return E_OK; } static t_Error ClearPerfCnts(t_FmPort *p_FmPort) { if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING) FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_QUEUE_UTIL, 0); FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL, 0); FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL, 0); FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL, 0); return E_OK; } static t_Error InitLowLevelDriver(t_FmPort *p_FmPort) { t_FmPortDriverParam *p_DriverParams = p_FmPort->p_FmPortDriverParam; struct fman_port_params portParams; uint32_t tmpVal; t_Error err; /* Set up flibs parameters and issue init function */ memset(&portParams, 0, sizeof(struct fman_port_params)); portParams.discard_mask = p_DriverParams->errorsToDiscard; portParams.dflt_fqid = p_DriverParams->dfltFqid; portParams.err_fqid = p_DriverParams->errFqid; portParams.deq_sp = p_DriverParams->deqSubPortal; portParams.dont_release_buf = p_DriverParams->dontReleaseBuf; switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): portParams.err_mask = (RX_ERRS_TO_ENQ & ~portParams.discard_mask); if (!p_FmPort->imEn) { if (p_DriverParams->forwardReuseIntContext) p_DriverParams->dfltCfg.rx_fd_bits = (uint8_t)(BMI_PORT_RFNE_FRWD_RPD >> 24); } break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): portParams.err_mask = (OP_ERRS_TO_ENQ & ~portParams.discard_mask); break; break; default: break; } tmpVal = (uint32_t)( (p_FmPort->internalBufferOffset % OFFSET_UNITS) ? (p_FmPort->internalBufferOffset / OFFSET_UNITS + 1) : (p_FmPort->internalBufferOffset / OFFSET_UNITS)); p_FmPort->internalBufferOffset = (uint8_t)(tmpVal * OFFSET_UNITS); p_DriverParams->dfltCfg.int_buf_start_margin = p_FmPort->internalBufferOffset; p_DriverParams->dfltCfg.ext_buf_start_margin = p_DriverParams->bufMargins.startMargins; p_DriverParams->dfltCfg.ext_buf_end_margin = p_DriverParams->bufMargins.endMargins; p_DriverParams->dfltCfg.ic_ext_offset = p_DriverParams->intContext.extBufOffset; p_DriverParams->dfltCfg.ic_int_offset = p_DriverParams->intContext.intContextOffset; p_DriverParams->dfltCfg.ic_size = p_DriverParams->intContext.size; p_DriverParams->dfltCfg.stats_counters_enable = TRUE; p_DriverParams->dfltCfg.perf_counters_enable = TRUE; p_DriverParams->dfltCfg.queue_counters_enable = TRUE; p_DriverParams->dfltCfg.perf_cnt_params.task_val = (uint8_t)p_FmPort->tasks.num; if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING || p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)p_DriverParams->dfltCfg.perf_cnt_params.queue_val = 0; else p_DriverParams->dfltCfg.perf_cnt_params.queue_val = 1; p_DriverParams->dfltCfg.perf_cnt_params.dma_val = (uint8_t)p_FmPort->openDmas.num; p_DriverParams->dfltCfg.perf_cnt_params.fifo_val = p_FmPort->fifoBufs.num; if (0 != fman_port_init(&p_FmPort->port, &p_DriverParams->dfltCfg, &portParams)) RETURN_ERROR(MAJOR, E_NO_DEVICE, ("fman_port_init")); if (p_FmPort->imEn && ((err = FmPortImInit(p_FmPort)) != E_OK)) RETURN_ERROR(MAJOR, err, NO_MSG); else { // from QMIInit if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) { if (p_DriverParams->deqPrefetchOption == e_FM_PORT_DEQ_NO_PREFETCH) FmSetPortPreFetchConfiguration(p_FmPort->h_Fm, p_FmPort->portId, FALSE); else FmSetPortPreFetchConfiguration(p_FmPort->h_Fm, p_FmPort->portId, TRUE); } } /* The code bellow is a trick so the FM will not release the buffer to BM nor will try to enqueue the frame to QM */ if (((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_TX)) && (!p_FmPort->imEn)) { if (!p_DriverParams->dfltFqid && p_DriverParams->dontReleaseBuf) { /* override fmbm_tcfqid 0 with a false non-0 value. This will force FM to * act according to tfene. Otherwise, if fmbm_tcfqid is 0 the FM will release * buffers to BM regardless of fmbm_tfene */ WRITE_UINT32(p_FmPort->port.bmi_regs->tx.fmbm_tcfqid, 0xFFFFFF); WRITE_UINT32(p_FmPort->port.bmi_regs->tx.fmbm_tfene, NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE); } } return E_OK; } static bool CheckRxBmiCounter(t_FmPort *p_FmPort, e_FmPortCounters counter) { UNUSED(p_FmPort); switch (counter) { case (e_FM_PORT_COUNTERS_CYCLE): case (e_FM_PORT_COUNTERS_TASK_UTIL): case (e_FM_PORT_COUNTERS_QUEUE_UTIL): case (e_FM_PORT_COUNTERS_DMA_UTIL): case (e_FM_PORT_COUNTERS_FIFO_UTIL): case (e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION): case (e_FM_PORT_COUNTERS_FRAME): case (e_FM_PORT_COUNTERS_DISCARD_FRAME): case (e_FM_PORT_COUNTERS_RX_BAD_FRAME): case (e_FM_PORT_COUNTERS_RX_LARGE_FRAME): case (e_FM_PORT_COUNTERS_RX_FILTER_FRAME): case (e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR): case (e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD): case (e_FM_PORT_COUNTERS_DEALLOC_BUF): case (e_FM_PORT_COUNTERS_PREPARE_TO_ENQUEUE_COUNTER): return TRUE; default: return FALSE; } } static bool CheckTxBmiCounter(t_FmPort *p_FmPort, e_FmPortCounters counter) { UNUSED(p_FmPort); switch (counter) { case (e_FM_PORT_COUNTERS_CYCLE): case (e_FM_PORT_COUNTERS_TASK_UTIL): case (e_FM_PORT_COUNTERS_QUEUE_UTIL): case (e_FM_PORT_COUNTERS_DMA_UTIL): case (e_FM_PORT_COUNTERS_FIFO_UTIL): case (e_FM_PORT_COUNTERS_FRAME): case (e_FM_PORT_COUNTERS_DISCARD_FRAME): case (e_FM_PORT_COUNTERS_LENGTH_ERR): case (e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT): case (e_FM_PORT_COUNTERS_DEALLOC_BUF): return TRUE; default: return FALSE; } } static bool CheckOhBmiCounter(t_FmPort *p_FmPort, e_FmPortCounters counter) { switch (counter) { case (e_FM_PORT_COUNTERS_CYCLE): case (e_FM_PORT_COUNTERS_TASK_UTIL): case (e_FM_PORT_COUNTERS_DMA_UTIL): case (e_FM_PORT_COUNTERS_FIFO_UTIL): case (e_FM_PORT_COUNTERS_FRAME): case (e_FM_PORT_COUNTERS_DISCARD_FRAME): case (e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR): case (e_FM_PORT_COUNTERS_WRED_DISCARD): case (e_FM_PORT_COUNTERS_LENGTH_ERR): case (e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT): case (e_FM_PORT_COUNTERS_DEALLOC_BUF): return TRUE; case (e_FM_PORT_COUNTERS_RX_FILTER_FRAME): if (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND) return FALSE; else return TRUE; default: return FALSE; } } static t_Error BmiPortCheckAndGetCounterType( t_FmPort *p_FmPort, e_FmPortCounters counter, enum fman_port_stats_counters *p_StatsType, enum fman_port_perf_counters *p_PerfType, bool *p_IsStats) { volatile uint32_t *p_Reg; bool isValid; switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): p_Reg = &p_FmPort->port.bmi_regs->rx.fmbm_rstc; isValid = CheckRxBmiCounter(p_FmPort, counter); break; case (e_FM_PORT_TYPE_TX_10G): case (e_FM_PORT_TYPE_TX): p_Reg = &p_FmPort->port.bmi_regs->tx.fmbm_tstc; isValid = CheckTxBmiCounter(p_FmPort, counter); break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): case (e_FM_PORT_TYPE_OH_HOST_COMMAND): p_Reg = &p_FmPort->port.bmi_regs->oh.fmbm_ostc; isValid = CheckOhBmiCounter(p_FmPort, counter); break; default: RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported port type")); } if (!isValid) RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for this port type")); /* check that counters are enabled */ switch (counter) { case (e_FM_PORT_COUNTERS_CYCLE): case (e_FM_PORT_COUNTERS_TASK_UTIL): case (e_FM_PORT_COUNTERS_QUEUE_UTIL): case (e_FM_PORT_COUNTERS_DMA_UTIL): case (e_FM_PORT_COUNTERS_FIFO_UTIL): case (e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION): /* performance counters - may be read when disabled */ *p_IsStats = FALSE; break; case (e_FM_PORT_COUNTERS_FRAME): case (e_FM_PORT_COUNTERS_DISCARD_FRAME): case (e_FM_PORT_COUNTERS_DEALLOC_BUF): case (e_FM_PORT_COUNTERS_RX_BAD_FRAME): case (e_FM_PORT_COUNTERS_RX_LARGE_FRAME): case (e_FM_PORT_COUNTERS_RX_FILTER_FRAME): case (e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR): case (e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD): case (e_FM_PORT_COUNTERS_LENGTH_ERR): case (e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT): case (e_FM_PORT_COUNTERS_WRED_DISCARD): *p_IsStats = TRUE; if (!(GET_UINT32(*p_Reg) & BMI_COUNTERS_EN)) RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); break; default: break; } /* Set counter */ switch (counter) { case (e_FM_PORT_COUNTERS_CYCLE): *p_PerfType = E_FMAN_PORT_PERF_CNT_CYCLE; break; case (e_FM_PORT_COUNTERS_TASK_UTIL): *p_PerfType = E_FMAN_PORT_PERF_CNT_TASK_UTIL; break; case (e_FM_PORT_COUNTERS_QUEUE_UTIL): *p_PerfType = E_FMAN_PORT_PERF_CNT_QUEUE_UTIL; break; case (e_FM_PORT_COUNTERS_DMA_UTIL): *p_PerfType = E_FMAN_PORT_PERF_CNT_DMA_UTIL; break; case (e_FM_PORT_COUNTERS_FIFO_UTIL): *p_PerfType = E_FMAN_PORT_PERF_CNT_FIFO_UTIL; break; case (e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION): *p_PerfType = E_FMAN_PORT_PERF_CNT_RX_PAUSE; break; case (e_FM_PORT_COUNTERS_FRAME): *p_StatsType = E_FMAN_PORT_STATS_CNT_FRAME; break; case (e_FM_PORT_COUNTERS_DISCARD_FRAME): *p_StatsType = E_FMAN_PORT_STATS_CNT_DISCARD; break; case (e_FM_PORT_COUNTERS_DEALLOC_BUF): *p_StatsType = E_FMAN_PORT_STATS_CNT_DEALLOC_BUF; break; case (e_FM_PORT_COUNTERS_RX_BAD_FRAME): *p_StatsType = E_FMAN_PORT_STATS_CNT_RX_BAD_FRAME; break; case (e_FM_PORT_COUNTERS_RX_LARGE_FRAME): *p_StatsType = E_FMAN_PORT_STATS_CNT_RX_LARGE_FRAME; break; case (e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD): *p_StatsType = E_FMAN_PORT_STATS_CNT_RX_OUT_OF_BUF; break; case (e_FM_PORT_COUNTERS_RX_FILTER_FRAME): *p_StatsType = E_FMAN_PORT_STATS_CNT_FILTERED_FRAME; break; case (e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR): *p_StatsType = E_FMAN_PORT_STATS_CNT_DMA_ERR; break; case (e_FM_PORT_COUNTERS_WRED_DISCARD): *p_StatsType = E_FMAN_PORT_STATS_CNT_WRED_DISCARD; break; case (e_FM_PORT_COUNTERS_LENGTH_ERR): *p_StatsType = E_FMAN_PORT_STATS_CNT_LEN_ERR; break; case (e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT): *p_StatsType = E_FMAN_PORT_STATS_CNT_UNSUPPORTED_FORMAT; break; default: break; } return E_OK; } static t_Error AdditionalPrsParams(t_FmPort *p_FmPort, t_FmPcdPrsAdditionalHdrParams *p_HdrParams, uint32_t *p_SoftSeqAttachReg) { uint8_t hdrNum, Ipv4HdrNum; u_FmPcdHdrPrsOpts *p_prsOpts; uint32_t tmpReg = *p_SoftSeqAttachReg, tmpPrsOffset; if (IS_PRIVATE_HEADER(p_HdrParams->hdr) || IS_SPECIAL_HEADER(p_HdrParams->hdr)) RETURN_ERROR( MAJOR, E_NOT_SUPPORTED, ("No additional parameters for private or special headers.")); if (p_HdrParams->errDisable) tmpReg |= PRS_HDR_ERROR_DIS; /* Set parser options */ if (p_HdrParams->usePrsOpts) { p_prsOpts = &p_HdrParams->prsOpts; switch (p_HdrParams->hdr) { case (HEADER_TYPE_MPLS): if (p_prsOpts->mplsPrsOptions.labelInterpretationEnable) tmpReg |= PRS_HDR_MPLS_LBL_INTER_EN; hdrNum = GetPrsHdrNum(p_prsOpts->mplsPrsOptions.nextParse); if (hdrNum == ILLEGAL_HDR_NUM) RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); Ipv4HdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4); if (hdrNum < Ipv4HdrNum) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Header must be equal or higher than IPv4")); tmpReg |= ((uint32_t)hdrNum * PRS_HDR_ENTRY_SIZE) << PRS_HDR_MPLS_NEXT_HDR_SHIFT; break; case (HEADER_TYPE_PPPoE): if (p_prsOpts->pppoePrsOptions.enableMTUCheck) tmpReg |= PRS_HDR_PPPOE_MTU_CHECK_EN; break; case (HEADER_TYPE_IPv6): if (p_prsOpts->ipv6PrsOptions.routingHdrEnable) tmpReg |= PRS_HDR_IPV6_ROUTE_HDR_EN; break; case (HEADER_TYPE_TCP): if (p_prsOpts->tcpPrsOptions.padIgnoreChecksum) tmpReg |= PRS_HDR_TCP_PAD_REMOVAL; else tmpReg &= ~PRS_HDR_TCP_PAD_REMOVAL; break; case (HEADER_TYPE_UDP): if (p_prsOpts->udpPrsOptions.padIgnoreChecksum) tmpReg |= PRS_HDR_UDP_PAD_REMOVAL; else tmpReg &= ~PRS_HDR_UDP_PAD_REMOVAL; break; default: RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid header")); } } /* set software parsing (address is divided in 2 since parser uses 2 byte access. */ if (p_HdrParams->swPrsEnable) { tmpPrsOffset = FmPcdGetSwPrsOffset(p_FmPort->h_FmPcd, p_HdrParams->hdr, p_HdrParams->indexPerHdr); if (tmpPrsOffset == ILLEGAL_BASE) RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); tmpReg |= (PRS_HDR_SW_PRS_EN | tmpPrsOffset); } *p_SoftSeqAttachReg = tmpReg; return E_OK; } static uint32_t GetPortSchemeBindParams( t_Handle h_FmPort, t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; uint32_t walking1Mask = 0x80000000, tmp; uint8_t idx = 0; p_SchemeBind->netEnvId = p_FmPort->netEnvId; p_SchemeBind->hardwarePortId = p_FmPort->hardwarePortId; p_SchemeBind->useClsPlan = p_FmPort->useClsPlan; p_SchemeBind->numOfSchemes = 0; tmp = p_FmPort->schemesPerPortVector; if (tmp) { while (tmp) { if (tmp & walking1Mask) { p_SchemeBind->schemesIds[p_SchemeBind->numOfSchemes] = idx; p_SchemeBind->numOfSchemes++; tmp &= ~walking1Mask; } walking1Mask >>= 1; idx++; } } return tmp; } static void FmPortCheckNApplyMacsec(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; volatile uint32_t *p_BmiCfgReg = NULL; uint32_t macsecEn = BMI_PORT_CFG_EN_MACSEC; uint32_t lcv, walking1Mask = 0x80000000; uint8_t cnt = 0; ASSERT_COND(p_FmPort); ASSERT_COND(p_FmPort->h_FmPcd); ASSERT_COND(!p_FmPort->p_FmPortDriverParam); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) return; p_BmiCfgReg = &p_FmPort->port.bmi_regs->rx.fmbm_rcfg; /* get LCV for MACSEC */ if ((lcv = FmPcdGetMacsecLcv(p_FmPort->h_FmPcd, p_FmPort->netEnvId)) != 0) { while (!(lcv & walking1Mask)) { cnt++; walking1Mask >>= 1; } macsecEn |= (uint32_t)cnt << BMI_PORT_CFG_MS_SEL_SHIFT; WRITE_UINT32(*p_BmiCfgReg, GET_UINT32(*p_BmiCfgReg) | macsecEn); } } static t_Error SetPcd(t_FmPort *p_FmPort, t_FmPortPcdParams *p_PcdParams) { t_Error err = E_OK; uint32_t tmpReg; volatile uint32_t *p_BmiNia = NULL; volatile uint32_t *p_BmiPrsNia = NULL; volatile uint32_t *p_BmiPrsStartOffset = NULL; volatile uint32_t *p_BmiInitPrsResult = NULL; volatile uint32_t *p_BmiCcBase = NULL; uint16_t hdrNum, L3HdrNum, greHdrNum; int i; bool isEmptyClsPlanGrp; uint32_t tmpHxs[FM_PCD_PRS_NUM_OF_HDRS]; uint16_t absoluteProfileId; uint8_t physicalSchemeId; uint32_t ccTreePhysOffset; t_FmPcdKgInterModuleBindPortToSchemes schemeBind; uint32_t initialSwPrs = 0; ASSERT_COND(p_FmPort); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); if (p_FmPort->imEn) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independant mode ports only")); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); p_FmPort->netEnvId = FmPcdGetNetEnvId(p_PcdParams->h_NetEnv); p_FmPort->pcdEngines = 0; /* initialize p_FmPort->pcdEngines field in port's structure */ switch (p_PcdParams->pcdSupport) { case (e_FM_PORT_PCD_SUPPORT_NONE): RETURN_ERROR( MAJOR, E_INVALID_STATE, ("No PCD configuration required if e_FM_PORT_PCD_SUPPORT_NONE selected")); case (e_FM_PORT_PCD_SUPPORT_PRS_ONLY): p_FmPort->pcdEngines |= FM_PCD_PRS; break; case (e_FM_PORT_PCD_SUPPORT_PLCR_ONLY): p_FmPort->pcdEngines |= FM_PCD_PLCR; break; case (e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR): p_FmPort->pcdEngines |= FM_PCD_PRS; p_FmPort->pcdEngines |= FM_PCD_PLCR; break; case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG): p_FmPort->pcdEngines |= FM_PCD_PRS; p_FmPort->pcdEngines |= FM_PCD_KG; break; case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC): p_FmPort->pcdEngines |= FM_PCD_PRS; p_FmPort->pcdEngines |= FM_PCD_CC; p_FmPort->pcdEngines |= FM_PCD_KG; break; case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR): p_FmPort->pcdEngines |= FM_PCD_PRS; p_FmPort->pcdEngines |= FM_PCD_KG; p_FmPort->pcdEngines |= FM_PCD_CC; p_FmPort->pcdEngines |= FM_PCD_PLCR; break; case (e_FM_PORT_PCD_SUPPORT_PRS_AND_CC): p_FmPort->pcdEngines |= FM_PCD_PRS; p_FmPort->pcdEngines |= FM_PCD_CC; break; case (e_FM_PORT_PCD_SUPPORT_PRS_AND_CC_AND_PLCR): p_FmPort->pcdEngines |= FM_PCD_PRS; p_FmPort->pcdEngines |= FM_PCD_CC; p_FmPort->pcdEngines |= FM_PCD_PLCR; break; case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR): p_FmPort->pcdEngines |= FM_PCD_PRS; p_FmPort->pcdEngines |= FM_PCD_KG; p_FmPort->pcdEngines |= FM_PCD_PLCR; break; case (e_FM_PORT_PCD_SUPPORT_CC_ONLY): p_FmPort->pcdEngines |= FM_PCD_CC; break; #ifdef FM_CAPWAP_SUPPORT case (e_FM_PORT_PCD_SUPPORT_CC_AND_KG): p_FmPort->pcdEngines |= FM_PCD_CC; p_FmPort->pcdEngines |= FM_PCD_KG; break; case (e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR): p_FmPort->pcdEngines |= FM_PCD_CC; p_FmPort->pcdEngines |= FM_PCD_KG; p_FmPort->pcdEngines |= FM_PCD_PLCR; break; #endif /* FM_CAPWAP_SUPPORT */ default: RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid pcdSupport")); } if ((p_FmPort->pcdEngines & FM_PCD_PRS) && (p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams > FM_PCD_PRS_NUM_OF_HDRS)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("Port parser numOfHdrsWithAdditionalParams may not exceed %d", FM_PCD_PRS_NUM_OF_HDRS)); /* check that parameters exist for each and only each defined engine */ if ((!!(p_FmPort->pcdEngines & FM_PCD_PRS) != !!p_PcdParams->p_PrsParams) || (!!(p_FmPort->pcdEngines & FM_PCD_KG) != !!p_PcdParams->p_KgParams) || (!!(p_FmPort->pcdEngines & FM_PCD_CC) != !!p_PcdParams->p_CcParams)) RETURN_ERROR( MAJOR, E_INVALID_STATE, ("PCD initialization structure is not consistent with pcdSupport")); /* get PCD registers pointers */ switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne; p_BmiPrsNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfpne; p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->rx.fmbm_rpso; p_BmiInitPrsResult = &p_FmPort->port.bmi_regs->rx.fmbm_rprai[0]; p_BmiCcBase = &p_FmPort->port.bmi_regs->rx.fmbm_rccb; break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne; p_BmiPrsNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofpne; p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->oh.fmbm_opso; p_BmiInitPrsResult = &p_FmPort->port.bmi_regs->oh.fmbm_oprai[0]; p_BmiCcBase = &p_FmPort->port.bmi_regs->oh.fmbm_occb; break; default: RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); } /* set PCD port parameter */ if (p_FmPort->pcdEngines & FM_PCD_CC) { err = FmPcdCcBindTree(p_FmPort->h_FmPcd, p_PcdParams, p_PcdParams->p_CcParams->h_CcTree, &ccTreePhysOffset, p_FmPort); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); WRITE_UINT32(*p_BmiCcBase, ccTreePhysOffset); p_FmPort->ccTreeId = p_PcdParams->p_CcParams->h_CcTree; } if (p_FmPort->pcdEngines & FM_PCD_KG) { if (p_PcdParams->p_KgParams->numOfSchemes == 0) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("For ports using Keygen, at least one scheme must be bound. ")); err = FmPcdKgSetOrBindToClsPlanGrp(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId, p_FmPort->netEnvId, p_FmPort->optArray, &p_FmPort->clsPlanGrpId, &isEmptyClsPlanGrp); if (err) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FmPcdKgSetOrBindToClsPlanGrp failed. ")); p_FmPort->useClsPlan = !isEmptyClsPlanGrp; schemeBind.netEnvId = p_FmPort->netEnvId; schemeBind.hardwarePortId = p_FmPort->hardwarePortId; schemeBind.numOfSchemes = p_PcdParams->p_KgParams->numOfSchemes; schemeBind.useClsPlan = p_FmPort->useClsPlan; /* for each scheme */ for (i = 0; i < p_PcdParams->p_KgParams->numOfSchemes; i++) { ASSERT_COND(p_PcdParams->p_KgParams->h_Schemes[i]); physicalSchemeId = FmPcdKgGetSchemeId( p_PcdParams->p_KgParams->h_Schemes[i]); schemeBind.schemesIds[i] = physicalSchemeId; /* build vector */ p_FmPort->schemesPerPortVector |= 1 << (31 - (uint32_t)physicalSchemeId); #if (DPAA_VERSION >= 11) /*because of the state that VSPE is defined per port - all PCD path should be according to this requirement if !VSPE - in port, for relevant scheme VSPE can not be set*/ if (!p_FmPort->vspe && FmPcdKgGetVspe((p_PcdParams->p_KgParams->h_Schemes[i]))) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("VSPE is not at port level")); #endif /* (DPAA_VERSION >= 11) */ } err = FmPcdKgBindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); } /***************************/ /* configure NIA after BMI */ /***************************/ /* rfne may contain FDCS bits, so first we read them. */ p_FmPort->savedBmiNia = GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK; /* If policer is used directly after BMI or PRS */ if ((p_FmPort->pcdEngines & FM_PCD_PLCR) && ((p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PLCR_ONLY) || (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR))) { if (!p_PcdParams->p_PlcrParams->h_Profile) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Profile should be initialized")); absoluteProfileId = (uint16_t)FmPcdPlcrProfileGetAbsoluteId( p_PcdParams->p_PlcrParams->h_Profile); if (!FmPcdPlcrIsProfileValid(p_FmPort->h_FmPcd, absoluteProfileId)) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Private port profile not valid.")); tmpReg = (uint32_t)(absoluteProfileId | NIA_PLCR_ABSOLUTE); if (p_FmPort->pcdEngines & FM_PCD_PRS) /* e_FM_PCD_SUPPORT_PRS_AND_PLCR */ /* update BMI HPNIA */ WRITE_UINT32(*p_BmiPrsNia, (uint32_t)(NIA_ENG_PLCR | tmpReg)); else /* e_FM_PCD_SUPPORT_PLCR_ONLY */ /* update BMI NIA */ p_FmPort->savedBmiNia |= (uint32_t)(NIA_ENG_PLCR); } /* if CC is used directly after BMI */ if ((p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_CC_ONLY) #ifdef FM_CAPWAP_SUPPORT || (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_CC_AND_KG) || (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR) #endif /* FM_CAPWAP_SUPPORT */ ) { if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING) RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("e_FM_PORT_PCD_SUPPORT_CC_xx available for offline parsing ports only")); p_FmPort->savedBmiNia |= (uint32_t)(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC); /* check that prs start offset == RIM[FOF] */ } if (p_FmPort->pcdEngines & FM_PCD_PRS) { ASSERT_COND(p_PcdParams->p_PrsParams); #if (DPAA_VERSION >= 11) if (p_PcdParams->p_PrsParams->firstPrsHdr == HEADER_TYPE_CAPWAP) hdrNum = OFFLOAD_SW_PATCH_CAPWAP_LABEL; else { #endif /* (DPAA_VERSION >= 11) */ /* if PRS is used it is always first */ hdrNum = GetPrsHdrNum(p_PcdParams->p_PrsParams->firstPrsHdr); if (hdrNum == ILLEGAL_HDR_NUM) RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unsupported header.")); #if (DPAA_VERSION >= 11) } #endif /* (DPAA_VERSION >= 11) */ p_FmPort->savedBmiNia |= (uint32_t)(NIA_ENG_PRS | (uint32_t)(hdrNum)); /* set after parser NIA */ tmpReg = 0; switch (p_PcdParams->pcdSupport) { case (e_FM_PORT_PCD_SUPPORT_PRS_ONLY): WRITE_UINT32(*p_BmiPrsNia, GET_NIA_BMI_AC_ENQ_FRAME(p_FmPort->h_FmPcd)); break; case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC): case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR): tmpReg = NIA_KG_CC_EN; case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG): case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR): if (p_PcdParams->p_KgParams->directScheme) { physicalSchemeId = FmPcdKgGetSchemeId( p_PcdParams->p_KgParams->h_DirectScheme); /* check that this scheme was bound to this port */ for (i = 0; i < p_PcdParams->p_KgParams->numOfSchemes; i++) if (p_PcdParams->p_KgParams->h_DirectScheme == p_PcdParams->p_KgParams->h_Schemes[i]) break; if (i == p_PcdParams->p_KgParams->numOfSchemes) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("Direct scheme is not one of the port selected schemes.")); tmpReg |= (uint32_t)(NIA_KG_DIRECT | physicalSchemeId); } WRITE_UINT32(*p_BmiPrsNia, NIA_ENG_KG | tmpReg); break; case (e_FM_PORT_PCD_SUPPORT_PRS_AND_CC): case (e_FM_PORT_PCD_SUPPORT_PRS_AND_CC_AND_PLCR): WRITE_UINT32(*p_BmiPrsNia, (uint32_t)(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC)); break; case (e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR): break; default: RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid PCD support")); } /* set start parsing offset */ WRITE_UINT32(*p_BmiPrsStartOffset, p_PcdParams->p_PrsParams->parsingOffset); /************************************/ /* Parser port parameters */ /************************************/ /* stop before configuring */ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pcac, PRS_CAC_STOP); /* wait for parser to be in idle state */ while (GET_UINT32(p_FmPort->p_FmPortPrsRegs->pcac) & PRS_CAC_ACTIVE) ; /* set soft seq attachment register */ memset(tmpHxs, 0, FM_PCD_PRS_NUM_OF_HDRS * sizeof(uint32_t)); /* set protocol options */ for (i = 0; p_FmPort->optArray[i]; i++) switch (p_FmPort->optArray[i]) { case (ETH_BROADCAST): hdrNum = GetPrsHdrNum(HEADER_TYPE_ETH); tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_ETH_BC_SHIFT; break; case (ETH_MULTICAST): hdrNum = GetPrsHdrNum(HEADER_TYPE_ETH); tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_ETH_MC_SHIFT; break; case (VLAN_STACKED): hdrNum = GetPrsHdrNum(HEADER_TYPE_VLAN); tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_VLAN_STACKED_SHIFT; break; case (MPLS_STACKED): hdrNum = GetPrsHdrNum(HEADER_TYPE_MPLS); tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_MPLS_STACKED_SHIFT; break; case (IPV4_BROADCAST_1): hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4); tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV4_1_BC_SHIFT; break; case (IPV4_MULTICAST_1): hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4); tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV4_1_MC_SHIFT; break; case (IPV4_UNICAST_2): hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4); tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV4_2_UC_SHIFT; break; case (IPV4_MULTICAST_BROADCAST_2): hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4); tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV4_2_MC_BC_SHIFT; break; case (IPV6_MULTICAST_1): hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6); tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV6_1_MC_SHIFT; break; case (IPV6_UNICAST_2): hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6); tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV6_2_UC_SHIFT; break; case (IPV6_MULTICAST_2): hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6); tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV6_2_MC_SHIFT; break; } if (FmPcdNetEnvIsHdrExist(p_FmPort->h_FmPcd, p_FmPort->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP)) { if (p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams == FM_PCD_PRS_NUM_OF_HDRS) RETURN_ERROR( MINOR, E_INVALID_VALUE, ("If HEADER_TYPE_UDP_ENCAP_ESP is used, numOfHdrsWithAdditionalParams may be up to FM_PCD_PRS_NUM_OF_HDRS - 1")); p_PcdParams->p_PrsParams->additionalParams[p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams].hdr = HEADER_TYPE_UDP; p_PcdParams->p_PrsParams->additionalParams[p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams].swPrsEnable = TRUE; p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams++; } /* set MPLS default next header - HW reset workaround */ hdrNum = GetPrsHdrNum(HEADER_TYPE_MPLS); tmpHxs[hdrNum] |= PRS_HDR_MPLS_LBL_INTER_EN; L3HdrNum = GetPrsHdrNum(HEADER_TYPE_USER_DEFINED_L3); tmpHxs[hdrNum] |= (uint32_t)L3HdrNum << PRS_HDR_MPLS_NEXT_HDR_SHIFT; /* for GRE, disable errors */ greHdrNum = GetPrsHdrNum(HEADER_TYPE_GRE); tmpHxs[greHdrNum] |= PRS_HDR_ERROR_DIS; /* For UDP remove PAD from L4 checksum calculation */ hdrNum = GetPrsHdrNum(HEADER_TYPE_UDP); tmpHxs[hdrNum] |= PRS_HDR_UDP_PAD_REMOVAL; /* For TCP remove PAD from L4 checksum calculation */ hdrNum = GetPrsHdrNum(HEADER_TYPE_TCP); tmpHxs[hdrNum] |= PRS_HDR_TCP_PAD_REMOVAL; /* config additional params for specific headers */ for (i = 0; i < p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams; i++) { /* case for using sw parser as the initial NIA address, before * HW parsing */ if ((p_PcdParams->p_PrsParams->additionalParams[i].hdr == HEADER_TYPE_NONE) && p_PcdParams->p_PrsParams->additionalParams[i].swPrsEnable) { initialSwPrs = FmPcdGetSwPrsOffset(p_FmPort->h_FmPcd, HEADER_TYPE_NONE, p_PcdParams->p_PrsParams->additionalParams[i].indexPerHdr); if (initialSwPrs == ILLEGAL_BASE) RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); /* clear parser first HXS */ p_FmPort->savedBmiNia &= ~BMI_RFNE_HXS_MASK; /* 0x000000FF */ /* rewrite with soft parser start */ p_FmPort->savedBmiNia |= initialSwPrs; continue; } hdrNum = GetPrsHdrNum(p_PcdParams->p_PrsParams->additionalParams[i].hdr); if (hdrNum == ILLEGAL_HDR_NUM) RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); if (hdrNum == NO_HDR_NUM) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("Private headers may not use additional parameters")); err = AdditionalPrsParams( p_FmPort, &p_PcdParams->p_PrsParams->additionalParams[i], &tmpHxs[hdrNum]); if (err) RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); } /* Check if ip-reassembly port - need to link sw-parser code */ if (p_FmPort->h_IpReassemblyManip) { /* link to sw parser code for IP Frag - only if no other code is applied. */ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4); if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN)) tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv4_IPR_LABEL); hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6); if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN)) tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv6_IPR_LABEL); } else { if (FmPcdNetEnvIsHdrExist(p_FmPort->h_FmPcd, p_FmPort->netEnvId, HEADER_TYPE_UDP_LITE)) { hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6); if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN)) tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv6_IPF_LABEL); } else if ((FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd) && (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING))) { hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6); if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN)) tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv6_IPF_LABEL); } } #if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) if (FmPcdNetEnvIsHdrExist(p_FmPort->h_FmPcd, p_FmPort->netEnvId, HEADER_TYPE_UDP_LITE)) { /* link to sw parser code for udp lite - only if no other code is applied. */ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6); if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN)) tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | UDP_LITE_SW_PATCH_LABEL); } #endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */ for (i = 0; i < FM_PCD_PRS_NUM_OF_HDRS; i++) { /* For all header set LCV as taken from netEnv*/ WRITE_UINT32( p_FmPort->p_FmPortPrsRegs->hdrs[i].lcv, FmPcdGetLcv(p_FmPort->h_FmPcd, p_FmPort->netEnvId, (uint8_t)i)); /* set HXS register according to default+Additional params+protocol options */ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->hdrs[i].softSeqAttach, tmpHxs[i]); } /* set tpid. */ tmpReg = PRS_TPID_DFLT; if (p_PcdParams->p_PrsParams->setVlanTpid1) { tmpReg &= PRS_TPID2_MASK; tmpReg |= (uint32_t)p_PcdParams->p_PrsParams->vlanTpid1 << PRS_PCTPID_SHIFT; } if (p_PcdParams->p_PrsParams->setVlanTpid2) { tmpReg &= PRS_TPID1_MASK; tmpReg |= (uint32_t)p_PcdParams->p_PrsParams->vlanTpid2; }WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pctpid, tmpReg); /* enable parser */ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pcac, 0); if (p_PcdParams->p_PrsParams->prsResultPrivateInfo) p_FmPort->privateInfo = p_PcdParams->p_PrsParams->prsResultPrivateInfo; } /* end parser */ else { if (FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd) && (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) { hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6); WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->hdrs[hdrNum].softSeqAttach, (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv6_IPF_LABEL)); } WRITE_UINT32(*p_BmiPrsStartOffset, 0); p_FmPort->privateInfo = 0; } FmPortCheckNApplyMacsec(p_FmPort); WRITE_UINT32( *p_BmiPrsStartOffset, GET_UINT32(*p_BmiPrsStartOffset) + p_FmPort->internalBufferOffset); /* set initial parser result - used for all engines */ for (i = 0; i < FM_PORT_PRS_RESULT_NUM_OF_WORDS; i++) { if (!i) WRITE_UINT32( *(p_BmiInitPrsResult), (uint32_t)(((uint32_t)p_FmPort->privateInfo << BMI_PR_PORTID_SHIFT) | BMI_PRS_RESULT_HIGH)); else { if (i < FM_PORT_PRS_RESULT_NUM_OF_WORDS / 2) WRITE_UINT32(*(p_BmiInitPrsResult+i), BMI_PRS_RESULT_HIGH); else WRITE_UINT32(*(p_BmiInitPrsResult+i), BMI_PRS_RESULT_LOW); } } return E_OK; } static t_Error DeletePcd(t_FmPort *p_FmPort) { t_Error err = E_OK; volatile uint32_t *p_BmiNia = NULL; volatile uint32_t *p_BmiPrsStartOffset = NULL; ASSERT_COND(p_FmPort); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); if (p_FmPort->imEn) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independant mode ports only")); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); if (!p_FmPort->pcdEngines) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("called for non PCD port")); /* get PCD registers pointers */ switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne; p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->rx.fmbm_rpso; break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne; p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->oh.fmbm_opso; break; default: RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); } if ((GET_UINT32(*p_BmiNia) & GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME()) != GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME()) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("port has to be detached previousely")); WRITE_UINT32(*p_BmiPrsStartOffset, 0); /* "cut" PCD out of the port's flow - go to BMI */ /* WRITE_UINT32(*p_BmiNia, (p_FmPort->savedBmiNia & BMI_RFNE_FDCS_MASK) | (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)); */ if (p_FmPort->pcdEngines & FM_PCD_PRS) { /* stop parser */ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pcac, PRS_CAC_STOP); /* wait for parser to be in idle state */ while (GET_UINT32(p_FmPort->p_FmPortPrsRegs->pcac) & PRS_CAC_ACTIVE) ; } if (p_FmPort->pcdEngines & FM_PCD_KG) { t_FmPcdKgInterModuleBindPortToSchemes schemeBind; /* unbind all schemes */ p_FmPort->schemesPerPortVector = GetPortSchemeBindParams(p_FmPort, &schemeBind); err = FmPcdKgUnbindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); err = FmPcdKgDeleteOrUnbindPortToClsPlanGrp(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId, p_FmPort->clsPlanGrpId); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); p_FmPort->useClsPlan = FALSE; } if (p_FmPort->pcdEngines & FM_PCD_CC) { /* unbind - we need to get the treeId too */ err = FmPcdCcUnbindTree(p_FmPort->h_FmPcd, p_FmPort->ccTreeId); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); } p_FmPort->pcdEngines = 0; return E_OK; } static t_Error AttachPCD(t_FmPort *p_FmPort) { volatile uint32_t *p_BmiNia = NULL; ASSERT_COND(p_FmPort); /* get PCD registers pointers */ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne; else p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne; /* check that current NIA is BMI to BMI */ if ((GET_UINT32(*p_BmiNia) & ~BMI_RFNE_FDCS_MASK) != GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME()) RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("may be called only for ports in BMI-to-BMI state.")); if (p_FmPort->requiredAction & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY) if (FmSetNumOfRiscsPerPort(p_FmPort->h_Fm, p_FmPort->hardwarePortId, 1, p_FmPort->orFmanCtrl) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); if (p_FmPort->requiredAction & UPDATE_NIA_CMNE) { if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ocmne, p_FmPort->savedBmiCmne); else WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rcmne, p_FmPort->savedBmiCmne); } if (p_FmPort->requiredAction & UPDATE_NIA_PNEN) WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnen, p_FmPort->savedQmiPnen); if (p_FmPort->requiredAction & UPDATE_NIA_FENE) { if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofene, p_FmPort->savedBmiFene); else WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rfene, p_FmPort->savedBmiFene); } if (p_FmPort->requiredAction & UPDATE_NIA_FPNE) { if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofpne, p_FmPort->savedBmiFpne); else WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rfpne, p_FmPort->savedBmiFpne); } if (p_FmPort->requiredAction & UPDATE_OFP_DPTE) { ASSERT_COND(p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING); WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofp, p_FmPort->savedBmiOfp); } WRITE_UINT32(*p_BmiNia, p_FmPort->savedBmiNia); if (p_FmPort->requiredAction & UPDATE_NIA_PNDN) { p_FmPort->origNonRxQmiRegsPndn = GET_UINT32(p_FmPort->port.qmi_regs->fmqm_pndn); WRITE_UINT32(p_FmPort->port.qmi_regs->fmqm_pndn, p_FmPort->savedNonRxQmiRegsPndn); } return E_OK; } static t_Error DetachPCD(t_FmPort *p_FmPort) { volatile uint32_t *p_BmiNia = NULL; ASSERT_COND(p_FmPort); /* get PCD registers pointers */ if (p_FmPort->requiredAction & UPDATE_NIA_PNDN) WRITE_UINT32(p_FmPort->port.qmi_regs->fmqm_pndn, p_FmPort->origNonRxQmiRegsPndn); if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne; else p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne; WRITE_UINT32( *p_BmiNia, (p_FmPort->savedBmiNia & BMI_RFNE_FDCS_MASK) | GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME()); if (FmPcdGetHcHandle(p_FmPort->h_FmPcd)) FmPcdHcSync(p_FmPort->h_FmPcd); if (p_FmPort->requiredAction & UPDATE_NIA_FENE) { if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofene, NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR); else WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rfene, NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR); } if (p_FmPort->requiredAction & UPDATE_NIA_PNEN) WRITE_UINT32(p_FmPort->port.qmi_regs->fmqm_pnen, NIA_ENG_BMI | NIA_BMI_AC_RELEASE); if (p_FmPort->requiredAction & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY) if (FmSetNumOfRiscsPerPort(p_FmPort->h_Fm, p_FmPort->hardwarePortId, 2, p_FmPort->orFmanCtrl) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); p_FmPort->requiredAction = 0; return E_OK; } /*****************************************************************************/ /* Inter-module API routines */ /*****************************************************************************/ void FmPortSetMacsecCmd(t_Handle h_FmPort, uint8_t dfltSci) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; volatile uint32_t *p_BmiCfgReg = NULL; uint32_t tmpReg; SANITY_CHECK_RETURN(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN(p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_TX)) { REPORT_ERROR(MAJOR, E_INVALID_OPERATION, ("The routine is relevant for Tx ports only")); return; } p_BmiCfgReg = &p_FmPort->port.bmi_regs->tx.fmbm_tfca; tmpReg = GET_UINT32(*p_BmiCfgReg) & ~BMI_CMD_ATTR_MACCMD_MASK; tmpReg |= BMI_CMD_ATTR_MACCMD_SECURED; tmpReg |= (((uint32_t)dfltSci << BMI_CMD_ATTR_MACCMD_SC_SHIFT) & BMI_CMD_ATTR_MACCMD_SC_MASK); WRITE_UINT32(*p_BmiCfgReg, tmpReg); } uint8_t FmPortGetNetEnvId(t_Handle h_FmPort) { return ((t_FmPort*)h_FmPort)->netEnvId; } uint8_t FmPortGetHardwarePortId(t_Handle h_FmPort) { return ((t_FmPort*)h_FmPort)->hardwarePortId; } uint32_t FmPortGetPcdEngines(t_Handle h_FmPort) { return ((t_FmPort*)h_FmPort)->pcdEngines; } #if (DPAA_VERSION >= 11) t_Error FmPortSetGprFunc(t_Handle h_FmPort, e_FmPortGprFuncType gprFunc, void **p_Value) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; uint32_t muramPageOffset; ASSERT_COND(p_FmPort); ASSERT_COND(p_Value); if (p_FmPort->gprFunc != e_FM_PORT_GPR_EMPTY) { if (p_FmPort->gprFunc != gprFunc) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("gpr was assigned with different func")); } else { switch (gprFunc) { case (e_FM_PORT_GPR_MURAM_PAGE): p_FmPort->p_ParamsPage = FM_MURAM_AllocMem(p_FmPort->h_FmMuram, 256, 8); if (!p_FmPort->p_ParamsPage) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for page")); IOMemSet32(p_FmPort->p_ParamsPage, 0, 256); muramPageOffset = (uint32_t)(XX_VirtToPhys(p_FmPort->p_ParamsPage) - p_FmPort->fmMuramPhysBaseAddr); switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): WRITE_UINT32( p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr, muramPageOffset); break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): WRITE_UINT32( p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ogpr, muramPageOffset); break; default: RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); } break; default: RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); } p_FmPort->gprFunc = gprFunc; } switch (p_FmPort->gprFunc) { case (e_FM_PORT_GPR_MURAM_PAGE): *p_Value = p_FmPort->p_ParamsPage; break; default: RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); } return E_OK; } #endif /* (DPAA_VERSION >= 11) */ t_Error FmPortGetSetCcParams(t_Handle h_FmPort, t_FmPortGetSetCcParams *p_CcParams) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; - int tmpInt; + uint32_t tmpInt; volatile uint32_t *p_BmiPrsStartOffset = NULL; /* this function called from Cc for pass and receive parameters port params between CC and PORT*/ if ((p_CcParams->getCcParams.type & OFFSET_OF_PR) && (p_FmPort->bufferOffsets.prsResultOffset != ILLEGAL_BASE)) { p_CcParams->getCcParams.prOffset = (uint8_t)p_FmPort->bufferOffsets.prsResultOffset; p_CcParams->getCcParams.type &= ~OFFSET_OF_PR; } if (p_CcParams->getCcParams.type & HW_PORT_ID) { p_CcParams->getCcParams.hardwarePortId = (uint8_t)p_FmPort->hardwarePortId; p_CcParams->getCcParams.type &= ~HW_PORT_ID; } if ((p_CcParams->getCcParams.type & OFFSET_OF_DATA) && (p_FmPort->bufferOffsets.dataOffset != ILLEGAL_BASE)) { p_CcParams->getCcParams.dataOffset = (uint16_t)p_FmPort->bufferOffsets.dataOffset; p_CcParams->getCcParams.type &= ~OFFSET_OF_DATA; } if (p_CcParams->getCcParams.type & NUM_OF_TASKS) { p_CcParams->getCcParams.numOfTasks = (uint8_t)p_FmPort->tasks.num; p_CcParams->getCcParams.type &= ~NUM_OF_TASKS; } if (p_CcParams->getCcParams.type & NUM_OF_EXTRA_TASKS) { p_CcParams->getCcParams.numOfExtraTasks = (uint8_t)p_FmPort->tasks.extra; p_CcParams->getCcParams.type &= ~NUM_OF_EXTRA_TASKS; } if (p_CcParams->getCcParams.type & FM_REV) { p_CcParams->getCcParams.revInfo.majorRev = p_FmPort->fmRevInfo.majorRev; p_CcParams->getCcParams.revInfo.minorRev = p_FmPort->fmRevInfo.minorRev; p_CcParams->getCcParams.type &= ~FM_REV; } if (p_CcParams->getCcParams.type & DISCARD_MASK) { if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) p_CcParams->getCcParams.discardMask = GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm); else p_CcParams->getCcParams.discardMask = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm); p_CcParams->getCcParams.type &= ~DISCARD_MASK; } if (p_CcParams->getCcParams.type & MANIP_EXTRA_SPACE) { p_CcParams->getCcParams.internalBufferOffset = p_FmPort->internalBufferOffset; p_CcParams->getCcParams.type &= ~MANIP_EXTRA_SPACE; } if (p_CcParams->getCcParams.type & GET_NIA_FPNE) { if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) p_CcParams->getCcParams.nia = GET_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofpne); else p_CcParams->getCcParams.nia = GET_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rfpne); p_CcParams->getCcParams.type &= ~GET_NIA_FPNE; } if (p_CcParams->getCcParams.type & GET_NIA_PNDN) { if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); p_CcParams->getCcParams.nia = GET_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndn); p_CcParams->getCcParams.type &= ~GET_NIA_PNDN; } if ((p_CcParams->setCcParams.type & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY) && !(p_FmPort->requiredAction & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY)) { p_FmPort->requiredAction |= UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY; p_FmPort->orFmanCtrl = p_CcParams->setCcParams.orFmanCtrl; } if ((p_CcParams->setCcParams.type & UPDATE_NIA_PNEN) && !(p_FmPort->requiredAction & UPDATE_NIA_PNEN)) { p_FmPort->savedQmiPnen = p_CcParams->setCcParams.nia; p_FmPort->requiredAction |= UPDATE_NIA_PNEN; } else if (p_CcParams->setCcParams.type & UPDATE_NIA_PNEN) { if (p_FmPort->savedQmiPnen != p_CcParams->setCcParams.nia) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("PNEN was defined previously different")); } if ((p_CcParams->setCcParams.type & UPDATE_NIA_PNDN) && !(p_FmPort->requiredAction & UPDATE_NIA_PNDN)) { p_FmPort->savedNonRxQmiRegsPndn = p_CcParams->setCcParams.nia; p_FmPort->requiredAction |= UPDATE_NIA_PNDN; } else if (p_CcParams->setCcParams.type & UPDATE_NIA_PNDN) { if (p_FmPort->savedNonRxQmiRegsPndn != p_CcParams->setCcParams.nia) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("PNDN was defined previously different")); } if ((p_CcParams->setCcParams.type & UPDATE_NIA_FENE) && (p_CcParams->setCcParams.overwrite || !(p_FmPort->requiredAction & UPDATE_NIA_FENE))) { p_FmPort->savedBmiFene = p_CcParams->setCcParams.nia; p_FmPort->requiredAction |= UPDATE_NIA_FENE; } else if (p_CcParams->setCcParams.type & UPDATE_NIA_FENE) { if (p_FmPort->savedBmiFene != p_CcParams->setCcParams.nia) RETURN_ERROR( MAJOR, E_INVALID_STATE, ("xFENE was defined previously different")); } if ((p_CcParams->setCcParams.type & UPDATE_NIA_FPNE) && !(p_FmPort->requiredAction & UPDATE_NIA_FPNE)) { p_FmPort->savedBmiFpne = p_CcParams->setCcParams.nia; p_FmPort->requiredAction |= UPDATE_NIA_FPNE; } else if (p_CcParams->setCcParams.type & UPDATE_NIA_FPNE) { if (p_FmPort->savedBmiFpne != p_CcParams->setCcParams.nia) RETURN_ERROR( MAJOR, E_INVALID_STATE, ("xFPNE was defined previously different")); } if ((p_CcParams->setCcParams.type & UPDATE_NIA_CMNE) && !(p_FmPort->requiredAction & UPDATE_NIA_CMNE)) { p_FmPort->savedBmiCmne = p_CcParams->setCcParams.nia; p_FmPort->requiredAction |= UPDATE_NIA_CMNE; } else if (p_CcParams->setCcParams.type & UPDATE_NIA_CMNE) { if (p_FmPort->savedBmiCmne != p_CcParams->setCcParams.nia) RETURN_ERROR( MAJOR, E_INVALID_STATE, ("xCMNE was defined previously different")); } if ((p_CcParams->setCcParams.type & UPDATE_PSO) && !(p_FmPort->requiredAction & UPDATE_PSO)) { /* get PCD registers pointers */ switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->rx.fmbm_rpso; break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->oh.fmbm_opso; break; default: RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); } /* set start parsing offset */ tmpInt = (int)GET_UINT32(*p_BmiPrsStartOffset) + p_CcParams->setCcParams.psoSize; if (tmpInt > 0) WRITE_UINT32(*p_BmiPrsStartOffset, (uint32_t)tmpInt); p_FmPort->requiredAction |= UPDATE_PSO; p_FmPort->savedPrsStartOffset = p_CcParams->setCcParams.psoSize; } else if (p_CcParams->setCcParams.type & UPDATE_PSO) { if (p_FmPort->savedPrsStartOffset != p_CcParams->setCcParams.psoSize) RETURN_ERROR( MAJOR, E_INVALID_STATE, ("parser start offset was defoned previousley different")); } if ((p_CcParams->setCcParams.type & UPDATE_OFP_DPTE) && !(p_FmPort->requiredAction & UPDATE_OFP_DPTE)) { if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); p_FmPort->savedBmiOfp = GET_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofp); p_FmPort->savedBmiOfp &= ~BMI_FIFO_PIPELINE_DEPTH_MASK; p_FmPort->savedBmiOfp |= p_CcParams->setCcParams.ofpDpde << BMI_FIFO_PIPELINE_DEPTH_SHIFT; p_FmPort->requiredAction |= UPDATE_OFP_DPTE; } return E_OK; } /*********************** End of inter-module routines ************************/ /****************************************/ /* API Init unit functions */ /****************************************/ t_Handle FM_PORT_Config(t_FmPortParams *p_FmPortParams) { t_FmPort *p_FmPort; uintptr_t baseAddr = p_FmPortParams->baseAddr; uint32_t tmpReg; /* Allocate FM structure */ p_FmPort = (t_FmPort *)XX_Malloc(sizeof(t_FmPort)); if (!p_FmPort) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Port driver structure")); return NULL; } memset(p_FmPort, 0, sizeof(t_FmPort)); /* Allocate the FM driver's parameters structure */ p_FmPort->p_FmPortDriverParam = (t_FmPortDriverParam *)XX_Malloc( sizeof(t_FmPortDriverParam)); if (!p_FmPort->p_FmPortDriverParam) { XX_Free(p_FmPort); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Port driver parameters")); return NULL; } memset(p_FmPort->p_FmPortDriverParam, 0, sizeof(t_FmPortDriverParam)); /* Initialize FM port parameters which will be kept by the driver */ p_FmPort->portType = p_FmPortParams->portType; p_FmPort->portId = p_FmPortParams->portId; p_FmPort->pcdEngines = FM_PCD_NONE; p_FmPort->f_Exception = p_FmPortParams->f_Exception; p_FmPort->h_App = p_FmPortParams->h_App; p_FmPort->h_Fm = p_FmPortParams->h_Fm; /* get FM revision */ FM_GetRevision(p_FmPort->h_Fm, &p_FmPort->fmRevInfo); /* calculate global portId number */ p_FmPort->hardwarePortId = SwPortIdToHwPortId(p_FmPort->portType, p_FmPortParams->portId, p_FmPort->fmRevInfo.majorRev, p_FmPort->fmRevInfo.minorRev); if (p_FmPort->fmRevInfo.majorRev >= 6) { if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND) && (p_FmPortParams->portId != FM_OH_PORT_ID)) DBG(WARNING, ("Port ID %d is recommended for HC port. Overwriting HW defaults to be suitable for HC.", FM_OH_PORT_ID)); if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) && (p_FmPortParams->portId == FM_OH_PORT_ID)) DBG(WARNING, ("Use non-zero portId for OP port due to insufficient resources on portId 0.")); } /* Set up FM port parameters for initialization phase only */ /* First, fill in flibs struct */ fman_port_defconfig(&p_FmPort->p_FmPortDriverParam->dfltCfg, (enum fman_port_type)p_FmPort->portType); /* Overwrite some integration specific parameters */ p_FmPort->p_FmPortDriverParam->dfltCfg.rx_pri_elevation = DEFAULT_PORT_rxFifoPriElevationLevel; p_FmPort->p_FmPortDriverParam->dfltCfg.rx_fifo_thr = DEFAULT_PORT_rxFifoThreshold; #if defined(FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675) || defined(FM_ERROR_VSP_NO_MATCH_SW006) p_FmPort->p_FmPortDriverParam->dfltCfg.errata_A006675 = TRUE; #else p_FmPort->p_FmPortDriverParam->dfltCfg.errata_A006675 = FALSE; #endif if ((p_FmPort->fmRevInfo.majorRev == 6) && (p_FmPort->fmRevInfo.minorRev == 0)) p_FmPort->p_FmPortDriverParam->dfltCfg.errata_A006320 = TRUE; else p_FmPort->p_FmPortDriverParam->dfltCfg.errata_A006320 = FALSE; /* Excessive Threshold register - exists for pre-FMv3 chips only */ if (p_FmPort->fmRevInfo.majorRev < 6) { #ifdef FM_NO_RESTRICT_ON_ACCESS_RSRC p_FmPort->p_FmPortDriverParam->dfltCfg.excessive_threshold_register = TRUE; #endif p_FmPort->p_FmPortDriverParam->dfltCfg.fmbm_rebm_has_sgd = FALSE; p_FmPort->p_FmPortDriverParam->dfltCfg.fmbm_tfne_has_features = FALSE; } else { p_FmPort->p_FmPortDriverParam->dfltCfg.excessive_threshold_register = FALSE; p_FmPort->p_FmPortDriverParam->dfltCfg.fmbm_rebm_has_sgd = TRUE; p_FmPort->p_FmPortDriverParam->dfltCfg.fmbm_tfne_has_features = TRUE; } if (p_FmPort->fmRevInfo.majorRev == 4) p_FmPort->p_FmPortDriverParam->dfltCfg.qmi_deq_options_support = FALSE; else p_FmPort->p_FmPortDriverParam->dfltCfg.qmi_deq_options_support = TRUE; /* Continue with other parameters */ p_FmPort->p_FmPortDriverParam->baseAddr = baseAddr; /* set memory map pointers */ p_FmPort->p_FmPortQmiRegs = (t_FmPortQmiRegs *)UINT_TO_PTR(baseAddr + QMI_PORT_REGS_OFFSET); p_FmPort->p_FmPortBmiRegs = (u_FmPortBmiRegs *)UINT_TO_PTR(baseAddr + BMI_PORT_REGS_OFFSET); p_FmPort->p_FmPortPrsRegs = (t_FmPortPrsRegs *)UINT_TO_PTR(baseAddr + PRS_PORT_REGS_OFFSET); p_FmPort->p_FmPortDriverParam->bufferPrefixContent.privDataSize = DEFAULT_PORT_bufferPrefixContent_privDataSize; p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passPrsResult = DEFAULT_PORT_bufferPrefixContent_passPrsResult; p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passTimeStamp = DEFAULT_PORT_bufferPrefixContent_passTimeStamp; p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passAllOtherPCDInfo = DEFAULT_PORT_bufferPrefixContent_passTimeStamp; p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign = DEFAULT_PORT_bufferPrefixContent_dataAlign; /* p_FmPort->p_FmPortDriverParam->dmaSwapData = (e_FmDmaSwapOption)DEFAULT_PORT_dmaSwapData; p_FmPort->p_FmPortDriverParam->dmaIntContextCacheAttr = (e_FmDmaCacheOption)DEFAULT_PORT_dmaIntContextCacheAttr; p_FmPort->p_FmPortDriverParam->dmaHeaderCacheAttr = (e_FmDmaCacheOption)DEFAULT_PORT_dmaHeaderCacheAttr; p_FmPort->p_FmPortDriverParam->dmaScatterGatherCacheAttr = (e_FmDmaCacheOption)DEFAULT_PORT_dmaScatterGatherCacheAttr; p_FmPort->p_FmPortDriverParam->dmaWriteOptimize = DEFAULT_PORT_dmaWriteOptimize; */ p_FmPort->p_FmPortDriverParam->liodnBase = p_FmPortParams->liodnBase; p_FmPort->p_FmPortDriverParam->cheksumLastBytesIgnore = DEFAULT_PORT_cheksumLastBytesIgnore; p_FmPort->maxFrameLength = DEFAULT_PORT_maxFrameLength; /* resource distribution. */ p_FmPort->fifoBufs.num = DEFAULT_PORT_numOfFifoBufs(p_FmPort->portType) * BMI_FIFO_UNITS; p_FmPort->fifoBufs.extra = DEFAULT_PORT_extraNumOfFifoBufs * BMI_FIFO_UNITS; p_FmPort->openDmas.num = DEFAULT_PORT_numOfOpenDmas(p_FmPort->portType); p_FmPort->openDmas.extra = DEFAULT_PORT_extraNumOfOpenDmas(p_FmPort->portType); p_FmPort->tasks.num = DEFAULT_PORT_numOfTasks(p_FmPort->portType); p_FmPort->tasks.extra = DEFAULT_PORT_extraNumOfTasks(p_FmPort->portType); #ifdef FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 if ((p_FmPort->fmRevInfo.majorRev == 6) && (p_FmPort->fmRevInfo.minorRev == 0) && ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) || (p_FmPort->portType == e_FM_PORT_TYPE_TX))) { p_FmPort->openDmas.num = 16; p_FmPort->openDmas.extra = 0; } #endif /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 */ /* Port type specific initialization: */ switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX): case (e_FM_PORT_TYPE_RX_10G): /* Initialize FM port parameters for initialization phase only */ p_FmPort->p_FmPortDriverParam->cutBytesFromEnd = DEFAULT_PORT_cutBytesFromEnd; p_FmPort->p_FmPortDriverParam->enBufPoolDepletion = FALSE; p_FmPort->p_FmPortDriverParam->frmDiscardOverride = DEFAULT_PORT_frmDiscardOverride; tmpReg = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfp); p_FmPort->p_FmPortDriverParam->rxFifoPriElevationLevel = (((tmpReg & BMI_RX_FIFO_PRI_ELEVATION_MASK) >> BMI_RX_FIFO_PRI_ELEVATION_SHIFT) + 1) * BMI_FIFO_UNITS; p_FmPort->p_FmPortDriverParam->rxFifoThreshold = (((tmpReg & BMI_RX_FIFO_THRESHOLD_MASK) >> BMI_RX_FIFO_THRESHOLD_SHIFT) + 1) * BMI_FIFO_UNITS; p_FmPort->p_FmPortDriverParam->bufMargins.endMargins = DEFAULT_PORT_BufMargins_endMargins; p_FmPort->p_FmPortDriverParam->errorsToDiscard = DEFAULT_PORT_errorsToDiscard; p_FmPort->p_FmPortDriverParam->forwardReuseIntContext = DEFAULT_PORT_forwardIntContextReuse; #if (DPAA_VERSION >= 11) p_FmPort->p_FmPortDriverParam->noScatherGather = DEFAULT_PORT_noScatherGather; #endif /* (DPAA_VERSION >= 11) */ break; case (e_FM_PORT_TYPE_TX): p_FmPort->p_FmPortDriverParam->dontReleaseBuf = FALSE; #ifdef FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 tmpReg = 0x00001013; WRITE_UINT32( p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tfp, tmpReg); #endif /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 */ case (e_FM_PORT_TYPE_TX_10G): tmpReg = GET_UINT32(p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tfp); p_FmPort->p_FmPortDriverParam->txFifoMinFillLevel = ((tmpReg & BMI_TX_FIFO_MIN_FILL_MASK) >> BMI_TX_FIFO_MIN_FILL_SHIFT) * BMI_FIFO_UNITS; p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth = (uint8_t)(((tmpReg & BMI_FIFO_PIPELINE_DEPTH_MASK) >> BMI_FIFO_PIPELINE_DEPTH_SHIFT) + 1); p_FmPort->p_FmPortDriverParam->txFifoLowComfLevel = (((tmpReg & BMI_TX_LOW_COMF_MASK) >> BMI_TX_LOW_COMF_SHIFT) + 1) * BMI_FIFO_UNITS; p_FmPort->p_FmPortDriverParam->deqType = DEFAULT_PORT_deqType; p_FmPort->p_FmPortDriverParam->deqPrefetchOption = DEFAULT_PORT_deqPrefetchOption; p_FmPort->p_FmPortDriverParam->deqHighPriority = (bool)((p_FmPort->portType == e_FM_PORT_TYPE_TX) ? DEFAULT_PORT_deqHighPriority_1G : DEFAULT_PORT_deqHighPriority_10G); p_FmPort->p_FmPortDriverParam->deqByteCnt = (uint16_t)( (p_FmPort->portType == e_FM_PORT_TYPE_TX) ? DEFAULT_PORT_deqByteCnt_1G : DEFAULT_PORT_deqByteCnt_10G); break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): p_FmPort->p_FmPortDriverParam->errorsToDiscard = DEFAULT_PORT_errorsToDiscard; #if (DPAA_VERSION >= 11) p_FmPort->p_FmPortDriverParam->noScatherGather = DEFAULT_PORT_noScatherGather; #endif /* (DPAA_VERSION >= 11) */ case (e_FM_PORT_TYPE_OH_HOST_COMMAND): p_FmPort->p_FmPortDriverParam->deqPrefetchOption = DEFAULT_PORT_deqPrefetchOption_HC; p_FmPort->p_FmPortDriverParam->deqHighPriority = DEFAULT_PORT_deqHighPriority_1G; p_FmPort->p_FmPortDriverParam->deqType = DEFAULT_PORT_deqType; p_FmPort->p_FmPortDriverParam->deqByteCnt = DEFAULT_PORT_deqByteCnt_1G; tmpReg = GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofp); p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth = (uint8_t)(((tmpReg & BMI_FIFO_PIPELINE_DEPTH_MASK) >> BMI_FIFO_PIPELINE_DEPTH_SHIFT) + 1); if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND) && (p_FmPortParams->portId != FM_OH_PORT_ID)) { /* Overwrite HC defaults */ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth = DEFAULT_PORT_fifoDeqPipelineDepth_OH; } #ifndef FM_FRAME_END_PARAMS_FOR_OP if (p_FmPort->fmRevInfo.majorRev < 6) p_FmPort->p_FmPortDriverParam->cheksumLastBytesIgnore = DEFAULT_notSupported; #endif /* !FM_FRAME_END_PARAMS_FOR_OP */ #ifndef FM_DEQ_PIPELINE_PARAMS_FOR_OP if (!((p_FmPort->fmRevInfo.majorRev == 4) || (p_FmPort->fmRevInfo.majorRev >= 6))) p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth = DEFAULT_notSupported; #endif /* !FM_DEQ_PIPELINE_PARAMS_FOR_OP */ break; default: XX_Free(p_FmPort->p_FmPortDriverParam); XX_Free(p_FmPort); REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); return NULL; } #ifdef FM_QMI_NO_DEQ_OPTIONS_SUPPORT if (p_FmPort->fmRevInfo.majorRev == 4) p_FmPort->p_FmPortDriverParam->deqPrefetchOption = (e_FmPortDeqPrefetchOption)DEFAULT_notSupported; #endif /* FM_QMI_NO_DEQ_OPTIONS_SUPPORT */ p_FmPort->imEn = p_FmPortParams->independentModeEnable; if (p_FmPort->imEn) { if ((p_FmPort->portType == e_FM_PORT_TYPE_TX) || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)) p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth = DEFAULT_PORT_fifoDeqPipelineDepth_IM; FmPortConfigIM(p_FmPort, p_FmPortParams); } else { switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX): case (e_FM_PORT_TYPE_RX_10G): /* Initialize FM port parameters for initialization phase only */ memcpy(&p_FmPort->p_FmPortDriverParam->extBufPools, &p_FmPortParams->specificParams.rxParams.extBufPools, sizeof(t_FmExtPools)); p_FmPort->p_FmPortDriverParam->errFqid = p_FmPortParams->specificParams.rxParams.errFqid; p_FmPort->p_FmPortDriverParam->dfltFqid = p_FmPortParams->specificParams.rxParams.dfltFqid; p_FmPort->p_FmPortDriverParam->liodnOffset = p_FmPortParams->specificParams.rxParams.liodnOffset; break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): case (e_FM_PORT_TYPE_TX): case (e_FM_PORT_TYPE_TX_10G): case (e_FM_PORT_TYPE_OH_HOST_COMMAND): p_FmPort->p_FmPortDriverParam->errFqid = p_FmPortParams->specificParams.nonRxParams.errFqid; p_FmPort->p_FmPortDriverParam->deqSubPortal = (uint8_t)(p_FmPortParams->specificParams.nonRxParams.qmChannel & QMI_DEQ_CFG_SUBPORTAL_MASK); p_FmPort->p_FmPortDriverParam->dfltFqid = p_FmPortParams->specificParams.nonRxParams.dfltFqid; break; default: XX_Free(p_FmPort->p_FmPortDriverParam); XX_Free(p_FmPort); REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); return NULL; } } memset(p_FmPort->name, 0, (sizeof(char)) * MODULE_NAME_SIZE); if (Sprint( p_FmPort->name, "FM-%d-port-%s-%d", FmGetId(p_FmPort->h_Fm), ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) ? "OH" : (p_FmPort->portType == e_FM_PORT_TYPE_RX ? "1g-RX" : (p_FmPort->portType == e_FM_PORT_TYPE_TX ? "1g-TX" : (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G ? "10g-RX" : "10g-TX")))), p_FmPort->portId) == 0) { XX_Free(p_FmPort->p_FmPortDriverParam); XX_Free(p_FmPort); REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); return NULL; } p_FmPort->h_Spinlock = XX_InitSpinlock(); if (!p_FmPort->h_Spinlock) { XX_Free(p_FmPort->p_FmPortDriverParam); XX_Free(p_FmPort); REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); return NULL; } return p_FmPort; } t_FmPort *rx_port = 0; t_FmPort *tx_port = 0; /**************************************************************************//** @Function FM_PORT_Init @Description Initializes the FM module @Param[in] h_FmPort - FM module descriptor @Return E_OK on success; Error code otherwise. *//***************************************************************************/ t_Error FM_PORT_Init(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_FmPortDriverParam *p_DriverParams; t_Error errCode; t_FmInterModulePortInitParams fmParams; t_FmRevisionInfo revInfo; SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); errCode = FmSpBuildBufferStructure( &p_FmPort->p_FmPortDriverParam->intContext, &p_FmPort->p_FmPortDriverParam->bufferPrefixContent, &p_FmPort->p_FmPortDriverParam->bufMargins, &p_FmPort->bufferOffsets, &p_FmPort->internalBufferOffset); if (errCode != E_OK) RETURN_ERROR(MAJOR, errCode, NO_MSG); #ifdef FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 if ((p_FmPort->p_FmPortDriverParam->bcbWorkaround) && (p_FmPort->portType == e_FM_PORT_TYPE_RX)) { p_FmPort->p_FmPortDriverParam->errorsToDiscard |= FM_PORT_FRM_ERR_PHYSICAL; if (!p_FmPort->fifoBufs.num) p_FmPort->fifoBufs.num = DEFAULT_PORT_numOfFifoBufs(p_FmPort->portType)*BMI_FIFO_UNITS; p_FmPort->fifoBufs.num += 4*KILOBYTE; } #endif /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 */ CHECK_INIT_PARAMETERS(p_FmPort, CheckInitParameters); p_DriverParams = p_FmPort->p_FmPortDriverParam; /* Set up flibs port structure */ memset(&p_FmPort->port, 0, sizeof(struct fman_port)); p_FmPort->port.type = (enum fman_port_type)p_FmPort->portType; FM_GetRevision(p_FmPort->h_Fm, &revInfo); p_FmPort->port.fm_rev_maj = revInfo.majorRev; p_FmPort->port.fm_rev_min = revInfo.minorRev; p_FmPort->port.bmi_regs = (union fman_port_bmi_regs *)UINT_TO_PTR(p_DriverParams->baseAddr + BMI_PORT_REGS_OFFSET); p_FmPort->port.qmi_regs = (struct fman_port_qmi_regs *)UINT_TO_PTR(p_DriverParams->baseAddr + QMI_PORT_REGS_OFFSET); p_FmPort->port.ext_pools_num = (uint8_t)((revInfo.majorRev == 4) ? 4 : 8); p_FmPort->port.im_en = p_FmPort->imEn; p_FmPort->p_FmPortPrsRegs = (t_FmPortPrsRegs *)UINT_TO_PTR(p_DriverParams->baseAddr + PRS_PORT_REGS_OFFSET); if (((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX)) && !p_FmPort->imEn) { /* Call the external Buffer routine which also checks fifo size and updates it if necessary */ /* define external buffer pools and pool depletion*/ errCode = SetExtBufferPools(p_FmPort); if (errCode) RETURN_ERROR(MAJOR, errCode, NO_MSG); /* check if the largest external buffer pool is large enough */ if (p_DriverParams->bufMargins.startMargins + MIN_EXT_BUF_SIZE + p_DriverParams->bufMargins.endMargins > p_FmPort->rxPoolsParams.largestBufSize) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("bufMargins.startMargins (%d) + minimum buf size (64) + bufMargins.endMargins (%d) is larger than maximum external buffer size (%d)", p_DriverParams->bufMargins.startMargins, p_DriverParams->bufMargins.endMargins, p_FmPort->rxPoolsParams.largestBufSize)); } if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) { { #ifdef FM_NO_OP_OBSERVED_POOLS t_FmRevisionInfo revInfo; FM_GetRevision(p_FmPort->h_Fm, &revInfo); if ((revInfo.majorRev == 4) && (p_DriverParams->enBufPoolDepletion)) #endif /* FM_NO_OP_OBSERVED_POOLS */ { /* define external buffer pools */ errCode = SetExtBufferPools(p_FmPort); if (errCode) RETURN_ERROR(MAJOR, errCode, NO_MSG); } } } /************************************************************/ /* Call FM module routine for communicating parameters */ /************************************************************/ memset(&fmParams, 0, sizeof(fmParams)); fmParams.hardwarePortId = p_FmPort->hardwarePortId; fmParams.portType = (e_FmPortType)p_FmPort->portType; fmParams.numOfTasks = (uint8_t)p_FmPort->tasks.num; fmParams.numOfExtraTasks = (uint8_t)p_FmPort->tasks.extra; fmParams.numOfOpenDmas = (uint8_t)p_FmPort->openDmas.num; fmParams.numOfExtraOpenDmas = (uint8_t)p_FmPort->openDmas.extra; if (p_FmPort->fifoBufs.num) { errCode = VerifySizeOfFifo(p_FmPort); if (errCode != E_OK) RETURN_ERROR(MAJOR, errCode, NO_MSG); } fmParams.sizeOfFifo = p_FmPort->fifoBufs.num; fmParams.extraSizeOfFifo = p_FmPort->fifoBufs.extra; fmParams.independentMode = p_FmPort->imEn; fmParams.liodnOffset = p_DriverParams->liodnOffset; fmParams.liodnBase = p_DriverParams->liodnBase; fmParams.deqPipelineDepth = p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth; fmParams.maxFrameLength = p_FmPort->maxFrameLength; #ifndef FM_DEQ_PIPELINE_PARAMS_FOR_OP if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) { if (!((p_FmPort->fmRevInfo.majorRev == 4) || (p_FmPort->fmRevInfo.majorRev >= 6))) /* HC ports do not have fifoDeqPipelineDepth, but it is needed only * for deq threshold calculation. */ fmParams.deqPipelineDepth = 2; } #endif /* !FM_DEQ_PIPELINE_PARAMS_FOR_OP */ errCode = FmGetSetPortParams(p_FmPort->h_Fm, &fmParams); if (errCode) RETURN_ERROR(MAJOR, errCode, NO_MSG); /* get params for use in init */ p_FmPort->fmMuramPhysBaseAddr = (uint64_t)((uint64_t)(fmParams.fmMuramPhysBaseAddr.low) | ((uint64_t)(fmParams.fmMuramPhysBaseAddr.high) << 32)); p_FmPort->h_FmMuram = FmGetMuramHandle(p_FmPort->h_Fm); errCode = InitLowLevelDriver(p_FmPort); if (errCode != E_OK) RETURN_ERROR(MAJOR, errCode, NO_MSG); FmPortDriverParamFree(p_FmPort); #if (DPAA_VERSION >= 11) if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) { t_FmPcdCtrlParamsPage *p_ParamsPage; FmPortSetGprFunc(p_FmPort, e_FM_PORT_GPR_MURAM_PAGE, (void**)&p_ParamsPage); ASSERT_COND(p_ParamsPage); WRITE_UINT32(p_ParamsPage->misc, FM_CTL_PARAMS_PAGE_ALWAYS_ON); #ifdef FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675 if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) { WRITE_UINT32( p_ParamsPage->misc, (GET_UINT32(p_ParamsPage->misc) | FM_CTL_PARAMS_PAGE_OP_FIX_EN)); WRITE_UINT32( p_ParamsPage->discardMask, GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm)); } #endif /* FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675 */ #ifdef FM_ERROR_VSP_NO_MATCH_SW006 if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) WRITE_UINT32( p_ParamsPage->errorsDiscardMask, (GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm) | GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsem))); else WRITE_UINT32( p_ParamsPage->errorsDiscardMask, (GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm) | GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsem))); #endif /* FM_ERROR_VSP_NO_MATCH_SW006 */ } #endif /* (DPAA_VERSION >= 11) */ if (p_FmPort->deepSleepVars.autoResMaxSizes) FmPortConfigAutoResForDeepSleepSupport1(p_FmPort); return E_OK; } /**************************************************************************//** @Function FM_PORT_Free @Description Frees all resources that were assigned to FM module. Calling this routine invalidates the descriptor. @Param[in] h_FmPort - FM module descriptor @Return E_OK on success; Error code otherwise. *//***************************************************************************/ t_Error FM_PORT_Free(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_FmInterModulePortFreeParams fmParams; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); if (p_FmPort->pcdEngines) RETURN_ERROR( MAJOR, E_INVALID_STATE, ("Trying to free a port with PCD. FM_PORT_DeletePCD must be called first.")); if (p_FmPort->enabled) { if (FM_PORT_Disable(p_FmPort) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM_PORT_Disable FAILED")); } if (p_FmPort->imEn) FmPortImFree(p_FmPort); FmPortDriverParamFree(p_FmPort); memset(&fmParams, 0, sizeof(fmParams)); fmParams.hardwarePortId = p_FmPort->hardwarePortId; fmParams.portType = (e_FmPortType)p_FmPort->portType; fmParams.deqPipelineDepth = p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth; FmFreePortParams(p_FmPort->h_Fm, &fmParams); #if (DPAA_VERSION >= 11) if (FmVSPFreeForPort(p_FmPort->h_Fm, p_FmPort->portType, p_FmPort->portId) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("VSP free of port FAILED")); if (p_FmPort->p_ParamsPage) FM_MURAM_FreeMem(p_FmPort->h_FmMuram, p_FmPort->p_ParamsPage); #endif /* (DPAA_VERSION >= 11) */ if (p_FmPort->h_Spinlock) XX_FreeSpinlock(p_FmPort->h_Spinlock); XX_Free(p_FmPort); return E_OK; } /*************************************************/ /* API Advanced Init unit functions */ /*************************************************/ t_Error FM_PORT_ConfigNumOfOpenDmas(t_Handle h_FmPort, t_FmPortRsrc *p_OpenDmas) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); p_FmPort->p_FmPortDriverParam->setNumOfOpenDmas = TRUE; memcpy(&p_FmPort->openDmas, p_OpenDmas, sizeof(t_FmPortRsrc)); return E_OK; } t_Error FM_PORT_ConfigNumOfTasks(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfTasks) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); memcpy(&p_FmPort->tasks, p_NumOfTasks, sizeof(t_FmPortRsrc)); p_FmPort->p_FmPortDriverParam->setNumOfTasks = TRUE; return E_OK; } t_Error FM_PORT_ConfigSizeOfFifo(t_Handle h_FmPort, t_FmPortRsrc *p_SizeOfFifo) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); p_FmPort->p_FmPortDriverParam->setSizeOfFifo = TRUE; memcpy(&p_FmPort->fifoBufs, p_SizeOfFifo, sizeof(t_FmPortRsrc)); return E_OK; } t_Error FM_PORT_ConfigDeqHighPriority(t_Handle h_FmPort, bool highPri) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("not available for Rx ports")); p_FmPort->p_FmPortDriverParam->dfltCfg.deq_high_pri = highPri; return E_OK; } t_Error FM_PORT_ConfigDeqType(t_Handle h_FmPort, e_FmPortDeqType deqType) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("not available for Rx ports")); p_FmPort->p_FmPortDriverParam->dfltCfg.deq_type = (enum fman_port_deq_type)deqType; return E_OK; } t_Error FM_PORT_ConfigDeqPrefetchOption( t_Handle h_FmPort, e_FmPortDeqPrefetchOption deqPrefetchOption) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("not available for Rx ports")); p_FmPort->p_FmPortDriverParam->dfltCfg.deq_prefetch_opt = (enum fman_port_deq_prefetch)deqPrefetchOption; return E_OK; } t_Error FM_PORT_ConfigBackupPools(t_Handle h_FmPort, t_FmBackupBmPools *p_BackupBmPools) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); p_FmPort->p_FmPortDriverParam->p_BackupBmPools = (t_FmBackupBmPools *)XX_Malloc(sizeof(t_FmBackupBmPools)); if (!p_FmPort->p_FmPortDriverParam->p_BackupBmPools) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("p_BackupBmPools allocation failed")); memcpy(p_FmPort->p_FmPortDriverParam->p_BackupBmPools, p_BackupBmPools, sizeof(t_FmBackupBmPools)); return E_OK; } t_Error FM_PORT_ConfigDeqByteCnt(t_Handle h_FmPort, uint16_t deqByteCnt) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("not available for Rx ports")); p_FmPort->p_FmPortDriverParam->dfltCfg.deq_byte_cnt = deqByteCnt; return E_OK; } t_Error FM_PORT_ConfigBufferPrefixContent( t_Handle h_FmPort, t_FmBufferPrefixContent *p_FmBufferPrefixContent) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); memcpy(&p_FmPort->p_FmPortDriverParam->bufferPrefixContent, p_FmBufferPrefixContent, sizeof(t_FmBufferPrefixContent)); /* if dataAlign was not initialized by user, we return to driver's default */ if (!p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign) p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign = DEFAULT_PORT_bufferPrefixContent_dataAlign; return E_OK; } t_Error FM_PORT_ConfigCheksumLastBytesIgnore(t_Handle h_FmPort, uint8_t checksumLastBytesIgnore) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); p_FmPort->p_FmPortDriverParam->dfltCfg.checksum_bytes_ignore = checksumLastBytesIgnore; return E_OK; } t_Error FM_PORT_ConfigCutBytesFromEnd(t_Handle h_FmPort, uint8_t cutBytesFromEnd) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); p_FmPort->p_FmPortDriverParam->dfltCfg.rx_cut_end_bytes = cutBytesFromEnd; return E_OK; } t_Error FM_PORT_ConfigPoolDepletion(t_Handle h_FmPort, t_FmBufPoolDepletion *p_BufPoolDepletion) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); p_FmPort->p_FmPortDriverParam->enBufPoolDepletion = TRUE; memcpy(&p_FmPort->p_FmPortDriverParam->bufPoolDepletion, p_BufPoolDepletion, sizeof(t_FmBufPoolDepletion)); return E_OK; } t_Error FM_PORT_ConfigObservedPoolDepletion( t_Handle h_FmPort, t_FmPortObservedBufPoolDepletion *p_FmPortObservedBufPoolDepletion) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for OP ports only")); p_FmPort->p_FmPortDriverParam->enBufPoolDepletion = TRUE; memcpy(&p_FmPort->p_FmPortDriverParam->bufPoolDepletion, &p_FmPortObservedBufPoolDepletion->poolDepletionParams, sizeof(t_FmBufPoolDepletion)); memcpy(&p_FmPort->p_FmPortDriverParam->extBufPools, &p_FmPortObservedBufPoolDepletion->poolsParams, sizeof(t_FmExtPools)); return E_OK; } t_Error FM_PORT_ConfigExtBufPools(t_Handle h_FmPort, t_FmExtPools *p_FmExtPools) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for OP ports only")); memcpy(&p_FmPort->p_FmPortDriverParam->extBufPools, p_FmExtPools, sizeof(t_FmExtPools)); return E_OK; } t_Error FM_PORT_ConfigDontReleaseTxBufToBM(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_TX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Tx ports only")); p_FmPort->p_FmPortDriverParam->dontReleaseBuf = TRUE; return E_OK; } t_Error FM_PORT_ConfigDfltColor(t_Handle h_FmPort, e_FmPortColor color) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); p_FmPort->p_FmPortDriverParam->dfltCfg.color = (enum fman_port_color)color; return E_OK; } t_Error FM_PORT_ConfigSyncReq(t_Handle h_FmPort, bool syncReq) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_TX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Not available for Tx ports")); p_FmPort->p_FmPortDriverParam->dfltCfg.sync_req = syncReq; return E_OK; } t_Error FM_PORT_ConfigFrmDiscardOverride(t_Handle h_FmPort, bool override) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_TX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Not available for Tx ports")); p_FmPort->p_FmPortDriverParam->dfltCfg.discard_override = override; return E_OK; } t_Error FM_PORT_ConfigErrorsToDiscard(t_Handle h_FmPort, fmPortFrameErrSelect_t errs) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); p_FmPort->p_FmPortDriverParam->errorsToDiscard = errs; return E_OK; } t_Error FM_PORT_ConfigDmaSwapData(t_Handle h_FmPort, e_FmDmaSwapOption swapData) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); p_FmPort->p_FmPortDriverParam->dfltCfg.dma_swap_data = (enum fman_port_dma_swap)swapData; return E_OK; } t_Error FM_PORT_ConfigDmaIcCacheAttr(t_Handle h_FmPort, e_FmDmaCacheOption intContextCacheAttr) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); p_FmPort->p_FmPortDriverParam->dfltCfg.dma_ic_stash_on = (bool)(intContextCacheAttr == e_FM_DMA_STASH); return E_OK; } t_Error FM_PORT_ConfigDmaHdrAttr(t_Handle h_FmPort, e_FmDmaCacheOption headerCacheAttr) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); p_FmPort->p_FmPortDriverParam->dfltCfg.dma_header_stash_on = (bool)(headerCacheAttr == e_FM_DMA_STASH); return E_OK; } t_Error FM_PORT_ConfigDmaScatterGatherAttr( t_Handle h_FmPort, e_FmDmaCacheOption scatterGatherCacheAttr) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); p_FmPort->p_FmPortDriverParam->dfltCfg.dma_sg_stash_on = (bool)(scatterGatherCacheAttr == e_FM_DMA_STASH); return E_OK; } t_Error FM_PORT_ConfigDmaWriteOptimize(t_Handle h_FmPort, bool optimize) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_TX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Not available for Tx ports")); p_FmPort->p_FmPortDriverParam->dfltCfg.dma_write_optimize = optimize; return E_OK; } #if (DPAA_VERSION >= 11) t_Error FM_PORT_ConfigNoScatherGather(t_Handle h_FmPort, bool noScatherGather) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; UNUSED(noScatherGather); UNUSED(p_FmPort); SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); p_FmPort->p_FmPortDriverParam->noScatherGather = noScatherGather; return E_OK; } #endif /* (DPAA_VERSION >= 11) */ t_Error FM_PORT_ConfigForwardReuseIntContext(t_Handle h_FmPort, bool forwardReuse) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); p_FmPort->p_FmPortDriverParam->forwardReuseIntContext = forwardReuse; return E_OK; } t_Error FM_PORT_ConfigMaxFrameLength(t_Handle h_FmPort, uint16_t length) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); p_FmPort->maxFrameLength = length; return E_OK; } #ifdef FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 t_Error FM_PORT_ConfigBCBWorkaround(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); p_FmPort->p_FmPortDriverParam->bcbWorkaround = TRUE; return E_OK; } #endif /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 */ /****************************************************/ /* Hidden-DEBUG Only API */ /****************************************************/ t_Error FM_PORT_ConfigTxFifoMinFillLevel(t_Handle h_FmPort, uint32_t minFillLevel) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_TX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Tx ports only")); p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_min_level = minFillLevel; return E_OK; } t_Error FM_PORT_ConfigFifoDeqPipelineDepth(t_Handle h_FmPort, uint8_t deqPipelineDepth) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Not available for Rx ports")); if (p_FmPort->imEn) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Not available for IM ports!")); p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth = deqPipelineDepth; return E_OK; } t_Error FM_PORT_ConfigTxFifoLowComfLevel(t_Handle h_FmPort, uint32_t fifoLowComfLevel) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_TX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Tx ports only")); p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_low_comf_level = fifoLowComfLevel; return E_OK; } t_Error FM_PORT_ConfigRxFifoThreshold(t_Handle h_FmPort, uint32_t fifoThreshold) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); p_FmPort->p_FmPortDriverParam->dfltCfg.rx_fifo_thr = fifoThreshold; return E_OK; } t_Error FM_PORT_ConfigRxFifoPriElevationLevel(t_Handle h_FmPort, uint32_t priElevationLevel) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); p_FmPort->p_FmPortDriverParam->dfltCfg.rx_pri_elevation = priElevationLevel; return E_OK; } /****************************************************/ /* API Run-time Control unit functions */ /****************************************************/ t_Error FM_PORT_SetNumOfOpenDmas(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfOpenDmas) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_Error err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((!p_NumOfOpenDmas->num) || (p_NumOfOpenDmas->num > MAX_NUM_OF_DMAS)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("openDmas-num can't be larger than %d", MAX_NUM_OF_DMAS)); if (p_NumOfOpenDmas->extra > MAX_NUM_OF_EXTRA_DMAS) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("openDmas-extra can't be larger than %d", MAX_NUM_OF_EXTRA_DMAS)); err = FmSetNumOfOpenDmas(p_FmPort->h_Fm, p_FmPort->hardwarePortId, (uint8_t*)&p_NumOfOpenDmas->num, (uint8_t*)&p_NumOfOpenDmas->extra, FALSE); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); memcpy(&p_FmPort->openDmas, p_NumOfOpenDmas, sizeof(t_FmPortRsrc)); return E_OK; } t_Error FM_PORT_SetNumOfTasks(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfTasks) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_Error err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); /* only driver uses host command port, so ASSERT rather than RETURN_ERROR */ ASSERT_COND(p_FmPort->portType != e_FM_PORT_TYPE_OH_HOST_COMMAND); if ((!p_NumOfTasks->num) || (p_NumOfTasks->num > MAX_NUM_OF_TASKS)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("NumOfTasks-num can't be larger than %d", MAX_NUM_OF_TASKS)); if (p_NumOfTasks->extra > MAX_NUM_OF_EXTRA_TASKS) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("NumOfTasks-extra can't be larger than %d", MAX_NUM_OF_EXTRA_TASKS)); err = FmSetNumOfTasks(p_FmPort->h_Fm, p_FmPort->hardwarePortId, (uint8_t*)&p_NumOfTasks->num, (uint8_t*)&p_NumOfTasks->extra, FALSE); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); /* update driver's struct */ memcpy(&p_FmPort->tasks, p_NumOfTasks, sizeof(t_FmPortRsrc)); return E_OK; } t_Error FM_PORT_SetSizeOfFifo(t_Handle h_FmPort, t_FmPortRsrc *p_SizeOfFifo) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_Error err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if (!p_SizeOfFifo->num || (p_SizeOfFifo->num > MAX_PORT_FIFO_SIZE)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("SizeOfFifo-num has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE)); if (p_SizeOfFifo->num % BMI_FIFO_UNITS) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("SizeOfFifo-num has to be divisible by %d", BMI_FIFO_UNITS)); if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) { /* extra FIFO size (allowed only to Rx ports) */ if (p_SizeOfFifo->extra % BMI_FIFO_UNITS) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("SizeOfFifo-extra has to be divisible by %d", BMI_FIFO_UNITS)); } else if (p_SizeOfFifo->extra) RETURN_ERROR(MAJOR, E_INVALID_VALUE, (" No SizeOfFifo-extra for non Rx ports")); memcpy(&p_FmPort->fifoBufs, p_SizeOfFifo, sizeof(t_FmPortRsrc)); /* we do not change user's parameter */ err = VerifySizeOfFifo(p_FmPort); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); err = FmSetSizeOfFifo(p_FmPort->h_Fm, p_FmPort->hardwarePortId, &p_SizeOfFifo->num, &p_SizeOfFifo->extra, FALSE); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); return E_OK; } uint32_t FM_PORT_GetBufferDataOffset(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0); SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, 0); return p_FmPort->bufferOffsets.dataOffset; } uint8_t * FM_PORT_GetBufferICInfo(t_Handle h_FmPort, char *p_Data) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, NULL); if (p_FmPort->bufferOffsets.pcdInfoOffset == ILLEGAL_BASE) return NULL; return (uint8_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.pcdInfoOffset); } t_FmPrsResult * FM_PORT_GetBufferPrsResult(t_Handle h_FmPort, char *p_Data) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, NULL); if (p_FmPort->bufferOffsets.prsResultOffset == ILLEGAL_BASE) return NULL; return (t_FmPrsResult *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.prsResultOffset); } uint64_t * FM_PORT_GetBufferTimeStamp(t_Handle h_FmPort, char *p_Data) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, NULL); if (p_FmPort->bufferOffsets.timeStampOffset == ILLEGAL_BASE) return NULL; return (uint64_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.timeStampOffset); } uint8_t * FM_PORT_GetBufferHashResult(t_Handle h_FmPort, char *p_Data) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, NULL); if (p_FmPort->bufferOffsets.hashResultOffset == ILLEGAL_BASE) return NULL; return (uint8_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.hashResultOffset); } t_Error FM_PORT_Disable(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; int err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); if (p_FmPort->imEn) FmPortImDisable(p_FmPort); err = fman_port_disable(&p_FmPort->port); if (err == -EBUSY) { DBG(WARNING, ("%s: BMI or QMI is Busy. Port forced down", p_FmPort->name)); } else if (err != 0) { RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_disable")); } p_FmPort->enabled = FALSE; return E_OK; } t_Error FM_PORT_Enable(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; int err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); /* Used by FM_PORT_Free routine as indication if to disable port. Thus set it to TRUE prior to enabling itself. This way if part of enable process fails there will be still things to disable during Free. For example, if BMI enable succeeded but QMI failed, still BMI needs to be disabled by Free. */ p_FmPort->enabled = TRUE; if (p_FmPort->imEn) FmPortImEnable(p_FmPort); err = fman_port_enable(&p_FmPort->port); if (err != 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_enable")); return E_OK; } t_Error FM_PORT_SetRateLimit(t_Handle h_FmPort, t_FmPortRateLimit *p_RateLimit) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; uint8_t factor, countUnitBit; uint16_t baseGran; struct fman_port_rate_limiter params; int err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_TX_10G): case (e_FM_PORT_TYPE_TX): baseGran = BMI_RATE_LIMIT_GRAN_TX; break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): baseGran = BMI_RATE_LIMIT_GRAN_OP; break; default: RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Tx and Offline parsing ports only")); } countUnitBit = (uint8_t)FmGetTimeStampScale(p_FmPort->h_Fm); /* TimeStamp per nano seconds units */ /* normally, we use 1 usec as the reference count */ factor = 1; /* if ratelimit is too small for a 1usec factor, multiply the factor */ while (p_RateLimit->rateLimit < baseGran / factor) { if (countUnitBit == 31) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Rate limit is too small")); countUnitBit++; factor <<= 1; } /* if ratelimit is too large for a 1usec factor, it is also larger than max rate*/ if (p_RateLimit->rateLimit > ((uint32_t)baseGran * (1 << 10) * (uint32_t)factor)) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Rate limit is too large")); if (!p_RateLimit->maxBurstSize || (p_RateLimit->maxBurstSize > BMI_RATE_LIMIT_MAX_BURST_SIZE)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("maxBurstSize must be between 1K and %dk", BMI_RATE_LIMIT_MAX_BURST_SIZE)); params.count_1micro_bit = (uint8_t)FmGetTimeStampScale(p_FmPort->h_Fm); params.high_burst_size_gran = FALSE; params.burst_size = p_RateLimit->maxBurstSize; params.rate = p_RateLimit->rateLimit; params.rate_factor = E_FMAN_PORT_RATE_DOWN_NONE; if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) { #ifndef FM_NO_ADVANCED_RATE_LIMITER if ((p_FmPort->fmRevInfo.majorRev == 4) || (p_FmPort->fmRevInfo.majorRev >= 6)) { params.high_burst_size_gran = TRUE; } else #endif /* ! FM_NO_ADVANCED_RATE_LIMITER */ { if (p_RateLimit->rateLimitDivider != e_FM_PORT_DUAL_RATE_LIMITER_NONE) RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PORT_ConfigDualRateLimitScaleDown")); if (p_RateLimit->maxBurstSize % 1000) { p_RateLimit->maxBurstSize = (uint16_t)((p_RateLimit->maxBurstSize / 1000) + 1); DBG(WARNING, ("rateLimit.maxBurstSize rounded up to %d", (p_RateLimit->maxBurstSize/1000+1)*1000)); } else p_RateLimit->maxBurstSize = (uint16_t)(p_RateLimit->maxBurstSize / 1000); } params.rate_factor = (enum fman_port_rate_limiter_scale_down)p_RateLimit->rateLimitDivider; params.burst_size = p_RateLimit->maxBurstSize; } err = fman_port_set_rate_limiter(&p_FmPort->port, ¶ms); if (err != 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_rate_limiter")); return E_OK; } t_Error FM_PORT_DeleteRateLimit(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; int err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Tx and Offline parsing ports only")); err = fman_port_delete_rate_limiter(&p_FmPort->port); if (err != 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_rate_limiter")); return E_OK; } t_Error FM_PORT_SetPfcPrioritiesMappingToQmanWQ(t_Handle h_FmPort, uint8_t prio, uint8_t wq) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; uint32_t tmpReg; uint32_t wqTmpReg; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); if ((p_FmPort->portType != e_FM_PORT_TYPE_TX) && (p_FmPort->portType != e_FM_PORT_TYPE_TX_10G)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("PFC mapping is available for Tx ports only")); if (prio > 7) RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, ("PFC priority (%d) is out of range (0-7)", prio)); if (wq > 7) RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, ("WQ (%d) is out of range (0-7)", wq)); tmpReg = GET_UINT32(p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tpfcm[0]); tmpReg &= ~(0xf << ((7 - prio) * 4)); wqTmpReg = ((uint32_t)wq << ((7 - prio) * 4)); tmpReg |= wqTmpReg; WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tpfcm[0], tmpReg); return E_OK; } t_Error FM_PORT_SetFrameQueueCounters(t_Handle h_FmPort, bool enable) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); fman_port_set_queue_cnt_mode(&p_FmPort->port, enable); return E_OK; } t_Error FM_PORT_SetPerformanceCounters(t_Handle h_FmPort, bool enable) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; int err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); err = fman_port_set_perf_cnt_mode(&p_FmPort->port, enable); if (err != 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_perf_cnt_mode")); return E_OK; } t_Error FM_PORT_SetPerformanceCountersParams( t_Handle h_FmPort, t_FmPortPerformanceCnt *p_FmPortPerformanceCnt) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; struct fman_port_perf_cnt_params params; int err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); /* check parameters */ if (!p_FmPortPerformanceCnt->taskCompVal || (p_FmPortPerformanceCnt->taskCompVal > p_FmPort->tasks.num)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("taskCompVal (%d) has to be in the range of 1 - %d (current value)!", p_FmPortPerformanceCnt->taskCompVal, p_FmPort->tasks.num)); if (!p_FmPortPerformanceCnt->dmaCompVal || (p_FmPortPerformanceCnt->dmaCompVal > p_FmPort->openDmas.num)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("dmaCompVal (%d) has to be in the range of 1 - %d (current value)!", p_FmPortPerformanceCnt->dmaCompVal, p_FmPort->openDmas.num)); if (!p_FmPortPerformanceCnt->fifoCompVal || (p_FmPortPerformanceCnt->fifoCompVal > p_FmPort->fifoBufs.num)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("fifoCompVal (%d) has to be in the range of 256 - %d (current value)!", p_FmPortPerformanceCnt->fifoCompVal, p_FmPort->fifoBufs.num)); if (p_FmPortPerformanceCnt->fifoCompVal % BMI_FIFO_UNITS) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("fifoCompVal (%d) has to be divisible by %d", p_FmPortPerformanceCnt->fifoCompVal, BMI_FIFO_UNITS)); switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): if (!p_FmPortPerformanceCnt->queueCompVal || (p_FmPortPerformanceCnt->queueCompVal > MAX_PERFORMANCE_RX_QUEUE_COMP)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("performanceCnt.queueCompVal for Rx has to be in the range of 1 - %d", MAX_PERFORMANCE_RX_QUEUE_COMP)); break; case (e_FM_PORT_TYPE_TX_10G): case (e_FM_PORT_TYPE_TX): if (!p_FmPortPerformanceCnt->queueCompVal || (p_FmPortPerformanceCnt->queueCompVal > MAX_PERFORMANCE_TX_QUEUE_COMP)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("performanceCnt.queueCompVal for Tx has to be in the range of 1 - %d", MAX_PERFORMANCE_TX_QUEUE_COMP)); break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): case (e_FM_PORT_TYPE_OH_HOST_COMMAND): if (p_FmPortPerformanceCnt->queueCompVal) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("performanceCnt.queueCompVal is not relevant for H/O ports.")); break; default: RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); } params.task_val = p_FmPortPerformanceCnt->taskCompVal; params.queue_val = p_FmPortPerformanceCnt->queueCompVal; params.dma_val = p_FmPortPerformanceCnt->dmaCompVal; params.fifo_val = p_FmPortPerformanceCnt->fifoCompVal; err = fman_port_set_perf_cnt_params(&p_FmPort->port, ¶ms); if (err != 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_perf_cnt_params")); return E_OK; } t_Error FM_PORT_AnalyzePerformanceParams(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_FmPortPerformanceCnt currParams, savedParams; t_Error err; bool underTest, failed = FALSE; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); XX_Print("Analyzing Performance parameters for port (type %d, id%d)\n", p_FmPort->portType, p_FmPort->portId); currParams.taskCompVal = (uint8_t)p_FmPort->tasks.num; if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) currParams.queueCompVal = 0; else currParams.queueCompVal = 1; currParams.dmaCompVal = (uint8_t)p_FmPort->openDmas.num; currParams.fifoCompVal = p_FmPort->fifoBufs.num; FM_PORT_SetPerformanceCounters(p_FmPort, FALSE); ClearPerfCnts(p_FmPort); if ((err = FM_PORT_SetPerformanceCountersParams(p_FmPort, &currParams)) != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); FM_PORT_SetPerformanceCounters(p_FmPort, TRUE); XX_UDelay(1000000); FM_PORT_SetPerformanceCounters(p_FmPort, FALSE); if (FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL)) { XX_Print( "Max num of defined port tasks (%d) utilized - Please enlarge\n", p_FmPort->tasks.num); failed = TRUE; } if (FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL)) { XX_Print( "Max num of defined port openDmas (%d) utilized - Please enlarge\n", p_FmPort->openDmas.num); failed = TRUE; } if (FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL)) { XX_Print( "Max size of defined port fifo (%d) utilized - Please enlarge\n", p_FmPort->fifoBufs.num); failed = TRUE; } if (failed) RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); memset(&savedParams, 0, sizeof(savedParams)); while (TRUE) { underTest = FALSE; if ((currParams.taskCompVal != 1) && !savedParams.taskCompVal) { currParams.taskCompVal--; underTest = TRUE; } if ((currParams.dmaCompVal != 1) && !savedParams.dmaCompVal) { currParams.dmaCompVal--; underTest = TRUE; } if ((currParams.fifoCompVal != BMI_FIFO_UNITS) && !savedParams.fifoCompVal) { currParams.fifoCompVal -= BMI_FIFO_UNITS; underTest = TRUE; } if (!underTest) break; ClearPerfCnts(p_FmPort); if ((err = FM_PORT_SetPerformanceCountersParams(p_FmPort, &currParams)) != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); FM_PORT_SetPerformanceCounters(p_FmPort, TRUE); XX_UDelay(1000000); FM_PORT_SetPerformanceCounters(p_FmPort, FALSE); if (!savedParams.taskCompVal && FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL)) savedParams.taskCompVal = (uint8_t)(currParams.taskCompVal + 2); if (!savedParams.dmaCompVal && FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL)) savedParams.dmaCompVal = (uint8_t)(currParams.dmaCompVal + 2); if (!savedParams.fifoCompVal && FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL)) savedParams.fifoCompVal = currParams.fifoCompVal + (2 * BMI_FIFO_UNITS); } XX_Print("best vals: tasks %d, dmas %d, fifos %d\n", savedParams.taskCompVal, savedParams.dmaCompVal, savedParams.fifoCompVal); return E_OK; } t_Error FM_PORT_SetStatisticsCounters(t_Handle h_FmPort, bool enable) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; int err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); err = fman_port_set_stats_cnt_mode(&p_FmPort->port, enable); if (err != 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_stats_cnt_mode")); return E_OK; } t_Error FM_PORT_SetErrorsRoute(t_Handle h_FmPort, fmPortFrameErrSelect_t errs) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; volatile uint32_t *p_ErrDiscard = NULL; int err; UNUSED(p_ErrDiscard); err = fman_port_set_err_mask(&p_FmPort->port, (uint32_t)errs); if (err != 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_err_mask")); #ifdef FM_ERROR_VSP_NO_MATCH_SW006 if (p_FmPort->fmRevInfo.majorRev >= 6) { t_FmPcdCtrlParamsPage *p_ParamsPage; FmPortSetGprFunc(p_FmPort, e_FM_PORT_GPR_MURAM_PAGE, (void**)&p_ParamsPage); ASSERT_COND(p_ParamsPage); switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): p_ErrDiscard = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm; break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): p_ErrDiscard = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm; break; default: RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); } WRITE_UINT32(p_ParamsPage->errorsDiscardMask, GET_UINT32(*p_ErrDiscard) | errs); } #endif /* FM_ERROR_VSP_NO_MATCH_SW006 */ return E_OK; } t_Error FM_PORT_SetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId, bool enable) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; int err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(poolIdp_FmPortDriverParam, E_INVALID_STATE); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); err = fman_port_set_bpool_cnt_mode(&p_FmPort->port, poolId, enable); if (err != 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_bpool_cnt_mode")); return E_OK; } t_Error FM_PORT_GetBmiCounters(t_Handle h_FmPort, t_FmPortBmiStats *p_BmiStats) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)){ p_BmiStats->cntCycle = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_CYCLE); /* fmbm_rccn */ p_BmiStats->cntTaskUtil = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL); /* fmbm_rtuc */ p_BmiStats->cntQueueUtil = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_QUEUE_UTIL); /* fmbm_rrquc */ p_BmiStats->cntDmaUtil = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL); /* fmbm_rduc */ p_BmiStats->cntFifoUtil = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL); /* fmbm_rfuc */ p_BmiStats->cntRxPauseActivation = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION); /* fmbm_rpac */ p_BmiStats->cntFrame = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FRAME); /* fmbm_rfrc */ p_BmiStats->cntDiscardFrame = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DISCARD_FRAME); /* fmbm_rfdc */ p_BmiStats->cntDeallocBuf = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DEALLOC_BUF); /* fmbm_rbdc */ p_BmiStats->cntRxBadFrame = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_BAD_FRAME); /* fmbm_rfbc */ p_BmiStats->cntRxLargeFrame = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_LARGE_FRAME); /* fmbm_rlfc */ p_BmiStats->cntRxFilterFrame = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_FILTER_FRAME); /* fmbm_rffc */ p_BmiStats->cntRxListDmaErr = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR); /* fmbm_rfldec */ p_BmiStats->cntRxOutOfBuffersDiscard = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD); /* fmbm_rodc */ p_BmiStats->cntWredDiscard = 0; p_BmiStats->cntLengthErr = 0; p_BmiStats->cntUnsupportedFormat = 0; } else if ((p_FmPort->portType == e_FM_PORT_TYPE_TX) || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)){ p_BmiStats->cntCycle = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_CYCLE); /* fmbm_tccn */ p_BmiStats->cntTaskUtil = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL); /* fmbm_ttuc */ p_BmiStats->cntQueueUtil = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_QUEUE_UTIL); /* fmbm_ttcquc */ p_BmiStats->cntDmaUtil = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL); /* fmbm_tduc */ p_BmiStats->cntFifoUtil = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL); /* fmbm_tfuc */ p_BmiStats->cntRxPauseActivation = 0; p_BmiStats->cntFrame = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FRAME); /* fmbm_tfrc */ p_BmiStats->cntDiscardFrame = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DISCARD_FRAME); /* fmbm_tfdc */ p_BmiStats->cntDeallocBuf = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DEALLOC_BUF); /* fmbm_tbdc */ p_BmiStats->cntRxBadFrame = 0; p_BmiStats->cntRxLargeFrame = 0; p_BmiStats->cntRxFilterFrame = 0; p_BmiStats->cntRxListDmaErr = 0; p_BmiStats->cntRxOutOfBuffersDiscard = 0; p_BmiStats->cntWredDiscard = 0; p_BmiStats->cntLengthErr = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_LENGTH_ERR); /* fmbm_tfledc */ p_BmiStats->cntUnsupportedFormat = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT); /* fmbm_tfufdc */ } else if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) { p_BmiStats->cntCycle = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_CYCLE); /* fmbm_occn */ p_BmiStats->cntTaskUtil = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL); /* fmbm_otuc */ p_BmiStats->cntQueueUtil = 0; p_BmiStats->cntDmaUtil = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL); /* fmbm_oduc */ p_BmiStats->cntFifoUtil = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL); /* fmbm_ofuc*/ p_BmiStats->cntRxPauseActivation = 0; p_BmiStats->cntFrame = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FRAME); /* fmbm_ofrc */ p_BmiStats->cntDiscardFrame = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DISCARD_FRAME); /* fmbm_ofdc */ p_BmiStats->cntDeallocBuf = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DEALLOC_BUF); /* fmbm_obdc*/ p_BmiStats->cntRxBadFrame = 0; p_BmiStats->cntRxLargeFrame = 0; p_BmiStats->cntRxFilterFrame = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_FILTER_FRAME); /* fmbm_offc */ p_BmiStats->cntRxListDmaErr = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR); /* fmbm_ofldec */ p_BmiStats->cntRxOutOfBuffersDiscard = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD); /* fmbm_rodc */ p_BmiStats->cntWredDiscard = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_WRED_DISCARD); /* fmbm_ofwdc */ p_BmiStats->cntLengthErr = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_LENGTH_ERR); /* fmbm_ofledc */ p_BmiStats->cntUnsupportedFormat = FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT); /* fmbm_ofufdc */ } return E_OK; } uint32_t FM_PORT_GetCounter(t_Handle h_FmPort, e_FmPortCounters counter) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; bool bmiCounter = FALSE; enum fman_port_stats_counters statsType; enum fman_port_perf_counters perfType; enum fman_port_qmi_counters queueType; bool isStats; t_Error errCode; SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); switch (counter) { case (e_FM_PORT_COUNTERS_DEQ_TOTAL): case (e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT): case (e_FM_PORT_COUNTERS_DEQ_CONFIRM): /* check that counter is available for the port type */ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) { REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for Rx ports")); return 0; } bmiCounter = FALSE; break; case (e_FM_PORT_COUNTERS_ENQ_TOTAL): bmiCounter = FALSE; break; default: /* BMI counters (or error - will be checked in BMI routine )*/ bmiCounter = TRUE; break; } if (bmiCounter) { errCode = BmiPortCheckAndGetCounterType(p_FmPort, counter, &statsType, &perfType, &isStats); if (errCode != E_OK) { REPORT_ERROR(MINOR, errCode, NO_MSG); return 0; } if (isStats) return fman_port_get_stats_counter(&p_FmPort->port, statsType); else return fman_port_get_perf_counter(&p_FmPort->port, perfType); } else /* QMI counter */ { /* check that counters are enabled */ if (!(GET_UINT32(p_FmPort->port.qmi_regs->fmqm_pnc) & QMI_PORT_CFG_EN_COUNTERS)) { REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); return 0; } /* Set counter */ switch (counter) { case (e_FM_PORT_COUNTERS_ENQ_TOTAL): queueType = E_FMAN_PORT_ENQ_TOTAL; break; case (e_FM_PORT_COUNTERS_DEQ_TOTAL): queueType = E_FMAN_PORT_DEQ_TOTAL; break; case (e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT): queueType = E_FMAN_PORT_DEQ_FROM_DFLT; break; case (e_FM_PORT_COUNTERS_DEQ_CONFIRM): queueType = E_FMAN_PORT_DEQ_CONFIRM; break; default: REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available")); return 0; } return fman_port_get_qmi_counter(&p_FmPort->port, queueType); } return 0; } t_Error FM_PORT_ModifyCounter(t_Handle h_FmPort, e_FmPortCounters counter, uint32_t value) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; bool bmiCounter = FALSE; enum fman_port_stats_counters statsType; enum fman_port_perf_counters perfType; enum fman_port_qmi_counters queueType; bool isStats; t_Error errCode; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); switch (counter) { case (e_FM_PORT_COUNTERS_DEQ_TOTAL): case (e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT): case (e_FM_PORT_COUNTERS_DEQ_CONFIRM): /* check that counter is available for the port type */ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) RETURN_ERROR( MINOR, E_INVALID_STATE, ("Requested counter is not available for Rx ports")); case (e_FM_PORT_COUNTERS_ENQ_TOTAL): bmiCounter = FALSE; break; default: /* BMI counters (or error - will be checked in BMI routine )*/ bmiCounter = TRUE; break; } if (bmiCounter) { errCode = BmiPortCheckAndGetCounterType(p_FmPort, counter, &statsType, &perfType, &isStats); if (errCode != E_OK) { RETURN_ERROR(MINOR, errCode, NO_MSG); } if (isStats) fman_port_set_stats_counter(&p_FmPort->port, statsType, value); else fman_port_set_perf_counter(&p_FmPort->port, perfType, value); } else /* QMI counter */ { /* check that counters are enabled */ if (!(GET_UINT32(p_FmPort->port.qmi_regs->fmqm_pnc) & QMI_PORT_CFG_EN_COUNTERS)) { RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); } /* Set counter */ switch (counter) { case (e_FM_PORT_COUNTERS_ENQ_TOTAL): queueType = E_FMAN_PORT_ENQ_TOTAL; break; case (e_FM_PORT_COUNTERS_DEQ_TOTAL): queueType = E_FMAN_PORT_DEQ_TOTAL; break; case (e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT): queueType = E_FMAN_PORT_DEQ_FROM_DFLT; break; case (e_FM_PORT_COUNTERS_DEQ_CONFIRM): queueType = E_FMAN_PORT_DEQ_CONFIRM; break; default: RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Requested counter is not available")); } fman_port_set_qmi_counter(&p_FmPort->port, queueType, value); } return E_OK; } uint32_t FM_PORT_GetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) { REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for non-Rx ports")); return 0; } return fman_port_get_bpool_counter(&p_FmPort->port, poolId); } t_Error FM_PORT_ModifyAllocBufCounter(t_Handle h_FmPort, uint8_t poolId, uint32_t value) { t_FmPort *p_FmPort = (t_FmPort *)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) RETURN_ERROR( MINOR, E_INVALID_STATE, ("Requested counter is not available for non-Rx ports")); fman_port_set_bpool_counter(&p_FmPort->port, poolId, value); return E_OK; } bool FM_PORT_IsStalled(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_Error err; bool isStalled; SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, FALSE); SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, FALSE); err = FmIsPortStalled(p_FmPort->h_Fm, p_FmPort->hardwarePortId, &isStalled); if (err != E_OK) { REPORT_ERROR(MAJOR, err, NO_MSG); return TRUE; } return isStalled; } t_Error FM_PORT_ReleaseStalled(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); return FmResumeStalledPort(p_FmPort->h_Fm, p_FmPort->hardwarePortId); } t_Error FM_PORT_SetRxL4ChecksumVerify(t_Handle h_FmPort, bool l4Checksum) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; int err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); if (l4Checksum) err = fman_port_modify_rx_fd_bits( &p_FmPort->port, (uint8_t)(BMI_PORT_RFNE_FRWD_DCL4C >> 24), TRUE); else err = fman_port_modify_rx_fd_bits( &p_FmPort->port, (uint8_t)(BMI_PORT_RFNE_FRWD_DCL4C >> 24), FALSE); if (err != 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_modify_rx_fd_bits")); return E_OK; } /*****************************************************************************/ /* API Run-time PCD Control unit functions */ /*****************************************************************************/ #if (DPAA_VERSION >= 11) t_Error FM_PORT_VSPAlloc(t_Handle h_FmPort, t_FmPortVSPAllocParams *p_VSPParams) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_Error err = E_OK; volatile uint32_t *p_BmiStorageProfileId = NULL, *p_BmiVspe = NULL; uint32_t tmpReg = 0, tmp = 0; uint16_t hwStoragePrflId; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPort->h_Fm, E_INVALID_HANDLE); /*for numOfProfiles = 0 don't call this function*/ SANITY_CHECK_RETURN_ERROR(p_VSPParams->numOfProfiles, E_INVALID_VALUE); /*dfltRelativeId should be in the range of numOfProfiles*/ SANITY_CHECK_RETURN_ERROR( p_VSPParams->dfltRelativeId < p_VSPParams->numOfProfiles, E_INVALID_VALUE); /*p_FmPort should be from Rx type or OP*/ SANITY_CHECK_RETURN_ERROR( ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)), E_INVALID_VALUE); /*port should be disabled*/ SANITY_CHECK_RETURN_ERROR(!p_FmPort->enabled, E_INVALID_STATE); /*if its called for Rx port relevant Tx Port should be passed (initialized) too and it should be disabled*/ SANITY_CHECK_RETURN_ERROR( ((p_VSPParams->h_FmTxPort && !((t_FmPort *)(p_VSPParams->h_FmTxPort))->enabled) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)), E_INVALID_VALUE); /*should be called before SetPCD - this port should be without PCD*/ SANITY_CHECK_RETURN_ERROR(!p_FmPort->pcdEngines, E_INVALID_STATE); /*alloc window of VSPs for this port*/ err = FmVSPAllocForPort(p_FmPort->h_Fm, p_FmPort->portType, p_FmPort->portId, p_VSPParams->numOfProfiles); if (err != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); /*get absolute VSP ID for dfltRelative*/ err = FmVSPGetAbsoluteProfileId(p_FmPort->h_Fm, p_FmPort->portType, p_FmPort->portId, p_VSPParams->dfltRelativeId, &hwStoragePrflId); if (err != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); /*fill relevant registers for p_FmPort and relative TxPort in the case p_FmPort from Rx type*/ switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): p_BmiStorageProfileId = &(((t_FmPort *)(p_VSPParams->h_FmTxPort))->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfqid); p_BmiVspe = &(((t_FmPort *)(p_VSPParams->h_FmTxPort))->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tfne); tmpReg = GET_UINT32(*p_BmiStorageProfileId) & ~BMI_SP_ID_MASK; tmpReg |= (uint32_t)hwStoragePrflId << BMI_SP_ID_SHIFT; WRITE_UINT32(*p_BmiStorageProfileId, tmpReg); tmpReg = GET_UINT32(*p_BmiVspe); WRITE_UINT32(*p_BmiVspe, tmpReg | BMI_SP_EN); p_BmiStorageProfileId = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfqid; p_BmiVspe = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rpp; hwStoragePrflId = p_VSPParams->dfltRelativeId; break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): tmpReg = NIA_ENG_BMI | NIA_BMI_AC_FETCH_ALL_FRAME; WRITE_UINT32( p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndn, tmpReg); p_BmiStorageProfileId = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofqid; p_BmiVspe = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_opp; tmp |= BMI_EBD_EN; break; default: RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); } p_FmPort->vspe = TRUE; p_FmPort->dfltRelativeId = p_VSPParams->dfltRelativeId; tmpReg = GET_UINT32(*p_BmiStorageProfileId) & ~BMI_SP_ID_MASK; tmpReg |= (uint32_t)hwStoragePrflId << BMI_SP_ID_SHIFT; WRITE_UINT32(*p_BmiStorageProfileId, tmpReg); tmpReg = GET_UINT32(*p_BmiVspe); WRITE_UINT32(*p_BmiVspe, tmpReg | BMI_SP_EN | tmp); return E_OK; } #endif /* (DPAA_VERSION >= 11) */ t_Error FM_PORT_PcdPlcrAllocProfiles(t_Handle h_FmPort, uint16_t numOfProfiles) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_Error err = E_OK; p_FmPort->h_FmPcd = FmGetPcdHandle(p_FmPort->h_Fm); ASSERT_COND(p_FmPort->h_FmPcd); if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) { DBG(TRACE, ("FM Port Try Lock - BUSY")); return ERROR_CODE(E_BUSY); } if (numOfProfiles) { err = FmPcdPlcrAllocProfiles(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId, numOfProfiles); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); } /* set the port handle within the PCD policer, even if no profiles defined */ FmPcdPortRegister(p_FmPort->h_FmPcd, h_FmPort, p_FmPort->hardwarePortId); RELEASE_LOCK(p_FmPort->lock); return E_OK; } t_Error FM_PORT_PcdPlcrFreeProfiles(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_Error err = E_OK; if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) { DBG(TRACE, ("FM Port Try Lock - BUSY")); return ERROR_CODE(E_BUSY); } err = FmPcdPlcrFreeProfiles(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId); RELEASE_LOCK(p_FmPort->lock); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); return E_OK; } t_Error FM_PORT_PcdKgModifyInitialScheme(t_Handle h_FmPort, t_FmPcdKgSchemeSelect *p_FmPcdKgScheme) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; volatile uint32_t *p_BmiHpnia = NULL; uint32_t tmpReg; uint8_t relativeSchemeId; uint8_t physicalSchemeId; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_KG, E_INVALID_STATE); tmpReg = (uint32_t)((p_FmPort->pcdEngines & FM_PCD_CC) ? NIA_KG_CC_EN : 0); switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): p_BmiHpnia = &p_FmPort->port.bmi_regs->rx.fmbm_rfpne; break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): p_BmiHpnia = &p_FmPort->port.bmi_regs->oh.fmbm_ofpne; break; default: RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); } if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) { DBG(TRACE, ("FM Port Try Lock - BUSY")); return ERROR_CODE(E_BUSY); } /* if we want to change to direct scheme, we need to check that this scheme is valid */ if (p_FmPcdKgScheme->direct) { physicalSchemeId = FmPcdKgGetSchemeId(p_FmPcdKgScheme->h_DirectScheme); /* check that this scheme is bound to this port */ if (!(p_FmPort->schemesPerPortVector & (uint32_t)(1 << (31 - (uint32_t)physicalSchemeId)))) { RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR( MAJOR, E_INVALID_STATE, ("called with a scheme that is not bound to this port")); } relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPort->h_FmPcd, physicalSchemeId); if (relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES) { RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, ("called with invalid Scheme ")); } if (!FmPcdKgIsSchemeValidSw(p_FmPcdKgScheme->h_DirectScheme)) { RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("called with uninitialized Scheme ")); } WRITE_UINT32( *p_BmiHpnia, NIA_ENG_KG | tmpReg | NIA_KG_DIRECT | (uint32_t)physicalSchemeId); } else /* change to indirect scheme */ WRITE_UINT32(*p_BmiHpnia, NIA_ENG_KG | tmpReg); RELEASE_LOCK(p_FmPort->lock); return E_OK; } t_Error FM_PORT_PcdPlcrModifyInitialProfile(t_Handle h_FmPort, t_Handle h_Profile) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; volatile uint32_t *p_BmiNia; volatile uint32_t *p_BmiHpnia; uint32_t tmpReg; uint16_t absoluteProfileId = FmPcdPlcrProfileGetAbsoluteId(h_Profile); SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_PLCR, E_INVALID_STATE); /* check relevance of this routine - only when policer is used directly after BMI or Parser */ if ((p_FmPort->pcdEngines & FM_PCD_KG) || (p_FmPort->pcdEngines & FM_PCD_CC)) RETURN_ERROR( MAJOR, E_INVALID_STATE, ("relevant only when PCD support mode is e_FM_PCD_SUPPORT_PLCR_ONLY or e_FM_PCD_SUPPORT_PRS_AND_PLCR")); switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne; p_BmiHpnia = &p_FmPort->port.bmi_regs->rx.fmbm_rfpne; tmpReg = GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK; break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne; p_BmiHpnia = &p_FmPort->port.bmi_regs->oh.fmbm_ofpne; tmpReg = 0; break; default: RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); } if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) { DBG(TRACE, ("FM Port Try Lock - BUSY")); return ERROR_CODE(E_BUSY); } if (!FmPcdPlcrIsProfileValid(p_FmPort->h_FmPcd, absoluteProfileId)) { RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Invalid profile")); } tmpReg |= (uint32_t)(NIA_ENG_PLCR | NIA_PLCR_ABSOLUTE | absoluteProfileId); if (p_FmPort->pcdEngines & FM_PCD_PRS) /* e_FM_PCD_SUPPORT_PRS_AND_PLCR */ { /* update BMI HPNIA */ WRITE_UINT32(*p_BmiHpnia, tmpReg); } else /* e_FM_PCD_SUPPORT_PLCR_ONLY */ { /* rfne may contain FDCS bits, so first we read them. */ tmpReg |= (GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK); /* update BMI NIA */ WRITE_UINT32(*p_BmiNia, tmpReg); }RELEASE_LOCK(p_FmPort->lock); return E_OK; } t_Error FM_PORT_PcdCcModifyTree(t_Handle h_FmPort, t_Handle h_CcTree) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_Error err = E_OK; volatile uint32_t *p_BmiCcBase = NULL; volatile uint32_t *p_BmiNia = NULL; uint32_t ccTreePhysOffset; SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(h_CcTree, E_INVALID_HANDLE); if (p_FmPort->imEn) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independent mode ports only")); /* get PCD registers pointers */ switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne; break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne; break; default: RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); } /* check that current NIA is BMI to BMI */ if ((GET_UINT32(*p_BmiNia) & ~BMI_RFNE_FDCS_MASK) != GET_NIA_BMI_AC_ENQ_FRAME(p_FmPort->h_FmPcd)) RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("may be called only for ports in BMI-to-BMI state.")); if (p_FmPort->pcdEngines & FM_PCD_CC) { if (p_FmPort->h_IpReassemblyManip) { err = FmPcdCcTreeAddIPR(p_FmPort->h_FmPcd, h_CcTree, NULL, p_FmPort->h_IpReassemblyManip, FALSE); if (err != E_OK) { RETURN_ERROR(MAJOR, err, NO_MSG); } } else if (p_FmPort->h_CapwapReassemblyManip) { err = FmPcdCcTreeAddCPR(p_FmPort->h_FmPcd, h_CcTree, NULL, p_FmPort->h_CapwapReassemblyManip, FALSE); if (err != E_OK) { RETURN_ERROR(MAJOR, err, NO_MSG); } } switch (p_FmPort->portType) { case (e_FM_PORT_TYPE_RX_10G): case (e_FM_PORT_TYPE_RX): p_BmiCcBase = &p_FmPort->port.bmi_regs->rx.fmbm_rccb; break; case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): p_BmiCcBase = &p_FmPort->port.bmi_regs->oh.fmbm_occb; break; default: RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); } if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) { DBG(TRACE, ("FM Port Try Lock - BUSY")); return ERROR_CODE(E_BUSY); } err = FmPcdCcBindTree(p_FmPort->h_FmPcd, NULL, h_CcTree, &ccTreePhysOffset, h_FmPort); if (err) { RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); }WRITE_UINT32(*p_BmiCcBase, ccTreePhysOffset); p_FmPort->ccTreeId = h_CcTree; RELEASE_LOCK(p_FmPort->lock); } else RETURN_ERROR( MAJOR, E_INVALID_STATE, ("Coarse Classification not defined for this port.")); return E_OK; } t_Error FM_PORT_AttachPCD(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); if (p_FmPort->imEn) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independent mode ports only")); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) { DBG(TRACE, ("FM Port Try Lock - BUSY")); return ERROR_CODE(E_BUSY); } if (p_FmPort->h_ReassemblyTree) p_FmPort->pcdEngines |= FM_PCD_CC; err = AttachPCD(h_FmPort); RELEASE_LOCK(p_FmPort->lock); return err; } t_Error FM_PORT_DetachPCD(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); if (p_FmPort->imEn) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independent mode ports only")); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) { DBG(TRACE, ("FM Port Try Lock - BUSY")); return ERROR_CODE(E_BUSY); } err = DetachPCD(h_FmPort); if (err != E_OK) { RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } if (p_FmPort->h_ReassemblyTree) p_FmPort->pcdEngines &= ~FM_PCD_CC; RELEASE_LOCK(p_FmPort->lock); return E_OK; } t_Error FM_PORT_SetPCD(t_Handle h_FmPort, t_FmPortPcdParams *p_PcdParam) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_Error err = E_OK; t_FmPortPcdParams modifiedPcdParams, *p_PcdParams; t_FmPcdCcTreeParams *p_FmPcdCcTreeParams; t_FmPortPcdCcParams fmPortPcdCcParams; t_FmPortGetSetCcParams fmPortGetSetCcParams; SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_PcdParam, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); if (p_FmPort->imEn) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independent mode ports only")); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) { DBG(TRACE, ("FM Port Try Lock - BUSY")); return ERROR_CODE(E_BUSY); } p_FmPort->h_FmPcd = FmGetPcdHandle(p_FmPort->h_Fm); ASSERT_COND(p_FmPort->h_FmPcd); if (p_PcdParam->p_CcParams && !p_PcdParam->p_CcParams->h_CcTree) RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("Tree handle must be given if CC is required")); memcpy(&modifiedPcdParams, p_PcdParam, sizeof(t_FmPortPcdParams)); p_PcdParams = &modifiedPcdParams; if ((p_PcdParams->h_IpReassemblyManip) #if (DPAA_VERSION >= 11) || (p_PcdParams->h_CapwapReassemblyManip) #endif /* (DPAA_VERSION >= 11) */ ) { if ((p_PcdParams->pcdSupport != e_FM_PORT_PCD_SUPPORT_PRS_AND_KG) && (p_PcdParams->pcdSupport != e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC) && (p_PcdParams->pcdSupport != e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR) && (p_PcdParams->pcdSupport != e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR)) { RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR( MAJOR, E_INVALID_STATE, ("pcdSupport must have KG for supporting Reassembly")); } p_FmPort->h_IpReassemblyManip = p_PcdParams->h_IpReassemblyManip; #if (DPAA_VERSION >= 11) if ((p_PcdParams->h_IpReassemblyManip) && (p_PcdParams->h_CapwapReassemblyManip)) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Either IP-R or CAPWAP-R is allowed")); if ((p_PcdParams->h_CapwapReassemblyManip) && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("CAPWAP-R is allowed only on offline-port")); if (p_PcdParams->h_CapwapReassemblyManip) p_FmPort->h_CapwapReassemblyManip = p_PcdParams->h_CapwapReassemblyManip; #endif /* (DPAA_VERSION >= 11) */ if (!p_PcdParams->p_CcParams) { if (!((p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PRS_AND_KG) || (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR))) { RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR( MAJOR, E_INVALID_STATE, ("PCD initialization structure is not consistent with pcdSupport")); } /* No user-tree, need to build internal tree */ p_FmPcdCcTreeParams = (t_FmPcdCcTreeParams*)XX_Malloc( sizeof(t_FmPcdCcTreeParams)); if (!p_FmPcdCcTreeParams) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("p_FmPcdCcTreeParams")); memset(p_FmPcdCcTreeParams, 0, sizeof(t_FmPcdCcTreeParams)); p_FmPcdCcTreeParams->h_NetEnv = p_PcdParams->h_NetEnv; p_FmPort->h_ReassemblyTree = FM_PCD_CcRootBuild( p_FmPort->h_FmPcd, p_FmPcdCcTreeParams); if (!p_FmPort->h_ReassemblyTree) { RELEASE_LOCK(p_FmPort->lock); XX_Free(p_FmPcdCcTreeParams); RETURN_ERROR( MAJOR, E_INVALID_HANDLE, ("FM_PCD_CcBuildTree for Reassembly failed")); } if (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PRS_AND_KG) p_PcdParams->pcdSupport = e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC; else p_PcdParams->pcdSupport = e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR; memset(&fmPortPcdCcParams, 0, sizeof(t_FmPortPcdCcParams)); fmPortPcdCcParams.h_CcTree = p_FmPort->h_ReassemblyTree; p_PcdParams->p_CcParams = &fmPortPcdCcParams; XX_Free(p_FmPcdCcTreeParams); } if (p_FmPort->h_IpReassemblyManip) err = FmPcdCcTreeAddIPR(p_FmPort->h_FmPcd, p_PcdParams->p_CcParams->h_CcTree, p_PcdParams->h_NetEnv, p_FmPort->h_IpReassemblyManip, TRUE); #if (DPAA_VERSION >= 11) else if (p_FmPort->h_CapwapReassemblyManip) err = FmPcdCcTreeAddCPR(p_FmPort->h_FmPcd, p_PcdParams->p_CcParams->h_CcTree, p_PcdParams->h_NetEnv, p_FmPort->h_CapwapReassemblyManip, TRUE); #endif /* (DPAA_VERSION >= 11) */ if (err != E_OK) { if (p_FmPort->h_ReassemblyTree) { FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree); p_FmPort->h_ReassemblyTree = NULL; }RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } } if (!FmPcdLockTryLockAll(p_FmPort->h_FmPcd)) { if (p_FmPort->h_ReassemblyTree) { FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree); p_FmPort->h_ReassemblyTree = NULL; }RELEASE_LOCK(p_FmPort->lock); DBG(TRACE, ("Try LockAll - BUSY")); return ERROR_CODE(E_BUSY); } err = SetPcd(h_FmPort, p_PcdParams); if (err) { if (p_FmPort->h_ReassemblyTree) { FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree); p_FmPort->h_ReassemblyTree = NULL; } FmPcdLockUnlockAll(p_FmPort->h_FmPcd); RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } if ((p_FmPort->pcdEngines & FM_PCD_PRS) && (p_PcdParams->p_PrsParams->includeInPrsStatistics)) { err = FmPcdPrsIncludePortInStatistics(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId, TRUE); if (err) { DeletePcd(p_FmPort); if (p_FmPort->h_ReassemblyTree) { FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree); p_FmPort->h_ReassemblyTree = NULL; } FmPcdLockUnlockAll(p_FmPort->h_FmPcd); RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } p_FmPort->includeInPrsStatistics = TRUE; } FmPcdIncNetEnvOwners(p_FmPort->h_FmPcd, p_FmPort->netEnvId); if (FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd)) { memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams)); if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) { #ifdef FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004 if ((p_FmPort->fmRevInfo.majorRev < 6) && (p_FmPort->pcdEngines & FM_PCD_KG)) { int i; for (i = 0; ip_KgParams->numOfSchemes; i++) /* The following function must be locked */ FmPcdKgCcGetSetParams(p_FmPort->h_FmPcd, p_PcdParams->p_KgParams->h_Schemes[i], UPDATE_KG_NIA_CC_WA, 0); } #endif /* FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004 */ #if (DPAA_VERSION >= 11) { t_FmPcdCtrlParamsPage *p_ParamsPage; FmPortSetGprFunc(p_FmPort, e_FM_PORT_GPR_MURAM_PAGE, (void**)&p_ParamsPage); ASSERT_COND(p_ParamsPage); WRITE_UINT32(p_ParamsPage->postBmiFetchNia, p_FmPort->savedBmiNia); } #endif /* (DPAA_VERSION >= 11) */ /* Set post-bmi-fetch nia */ p_FmPort->savedBmiNia &= BMI_RFNE_FDCS_MASK; p_FmPort->savedBmiNia |= (NIA_FM_CTL_AC_POST_BMI_FETCH | NIA_ENG_FM_CTL); /* Set pre-bmi-fetch nia */ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNDN; #if (DPAA_VERSION >= 11) fmPortGetSetCcParams.setCcParams.nia = (NIA_FM_CTL_AC_PRE_BMI_FETCH_FULL_FRAME | NIA_ENG_FM_CTL); #else fmPortGetSetCcParams.setCcParams.nia = (NIA_FM_CTL_AC_PRE_BMI_FETCH_HEADER | NIA_ENG_FM_CTL); #endif /* (DPAA_VERSION >= 11) */ if ((err = FmPortGetSetCcParams(p_FmPort, &fmPortGetSetCcParams)) != E_OK) { DeletePcd(p_FmPort); if (p_FmPort->h_ReassemblyTree) { FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree); p_FmPort->h_ReassemblyTree = NULL; } FmPcdLockUnlockAll(p_FmPort->h_FmPcd); RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } } FmPcdLockUnlockAll(p_FmPort->h_FmPcd); /* Set pop-to-next-step nia */ #if (DPAA_VERSION == 10) if (p_FmPort->fmRevInfo.majorRev < 6) { fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN; fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP | NIA_ENG_FM_CTL; } else { #endif /* (DPAA_VERSION == 10) */ fmPortGetSetCcParams.getCcParams.type = GET_NIA_FPNE; #if (DPAA_VERSION == 10) } #endif /* (DPAA_VERSION == 10) */ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams)) != E_OK) { DeletePcd(p_FmPort); if (p_FmPort->h_ReassemblyTree) { FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree); p_FmPort->h_ReassemblyTree = NULL; }RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } /* Set post-bmi-prepare-to-enq nia */ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_FENE; fmPortGetSetCcParams.setCcParams.nia = (NIA_FM_CTL_AC_POST_BMI_ENQ | NIA_ENG_FM_CTL); if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams)) != E_OK) { DeletePcd(p_FmPort); if (p_FmPort->h_ReassemblyTree) { FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree); p_FmPort->h_ReassemblyTree = NULL; }RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } if ((p_FmPort->h_IpReassemblyManip) || (p_FmPort->h_CapwapReassemblyManip)) { #if (DPAA_VERSION == 10) if (p_FmPort->fmRevInfo.majorRev < 6) { /* Overwrite post-bmi-prepare-to-enq nia */ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_FENE; fmPortGetSetCcParams.setCcParams.nia = (NIA_FM_CTL_AC_POST_BMI_ENQ_ORR | NIA_ENG_FM_CTL | NIA_ORDER_RESTOR); fmPortGetSetCcParams.setCcParams.overwrite = TRUE; } else { #endif /* (DPAA_VERSION == 10) */ /* Set the ORR bit (for order-restoration) */ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_FPNE; fmPortGetSetCcParams.setCcParams.nia = fmPortGetSetCcParams.getCcParams.nia | NIA_ORDER_RESTOR; #if (DPAA_VERSION == 10) } #endif /* (DPAA_VERSION == 10) */ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams)) != E_OK) { DeletePcd(p_FmPort); if (p_FmPort->h_ReassemblyTree) { FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree); p_FmPort->h_ReassemblyTree = NULL; }RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } } } else FmPcdLockUnlockAll(p_FmPort->h_FmPcd); #if (DPAA_VERSION >= 11) { t_FmPcdCtrlParamsPage *p_ParamsPage; memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams)); fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_CMNE; if (FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd)) fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP | NIA_ENG_FM_CTL; else fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_NO_IPACC_POP_TO_N_STEP | NIA_ENG_FM_CTL; if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams)) != E_OK) { DeletePcd(p_FmPort); if (p_FmPort->h_ReassemblyTree) { FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree); p_FmPort->h_ReassemblyTree = NULL; }RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } FmPortSetGprFunc(p_FmPort, e_FM_PORT_GPR_MURAM_PAGE, (void**)&p_ParamsPage); ASSERT_COND(p_ParamsPage); if (FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd)) WRITE_UINT32( p_ParamsPage->misc, GET_UINT32(p_ParamsPage->misc) | FM_CTL_PARAMS_PAGE_OFFLOAD_SUPPORT_EN); if ((p_FmPort->h_IpReassemblyManip) || (p_FmPort->h_CapwapReassemblyManip)) { if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) WRITE_UINT32( p_ParamsPage->discardMask, GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm)); else WRITE_UINT32( p_ParamsPage->discardMask, GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm)); } #ifdef FM_ERROR_VSP_NO_MATCH_SW006 if (p_FmPort->vspe) WRITE_UINT32( p_ParamsPage->misc, GET_UINT32(p_ParamsPage->misc) | (p_FmPort->dfltRelativeId & FM_CTL_PARAMS_PAGE_ERROR_VSP_MASK)); #endif /* FM_ERROR_VSP_NO_MATCH_SW006 */ } #endif /* (DPAA_VERSION >= 11) */ err = AttachPCD(h_FmPort); if (err) { DeletePcd(p_FmPort); if (p_FmPort->h_ReassemblyTree) { FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree); p_FmPort->h_ReassemblyTree = NULL; }RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } RELEASE_LOCK(p_FmPort->lock); return err; } t_Error FM_PORT_DeletePCD(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); if (p_FmPort->imEn) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independant mode ports only")); if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) RETURN_ERROR( MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) { DBG(TRACE, ("FM Port Try Lock - BUSY")); return ERROR_CODE(E_BUSY); } err = DetachPCD(h_FmPort); if (err) { RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } FmPcdDecNetEnvOwners(p_FmPort->h_FmPcd, p_FmPort->netEnvId); /* we do it anyway, instead of checking if included */ if ((p_FmPort->pcdEngines & FM_PCD_PRS) && p_FmPort->includeInPrsStatistics) { FmPcdPrsIncludePortInStatistics(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId, FALSE); p_FmPort->includeInPrsStatistics = FALSE; } if (!FmPcdLockTryLockAll(p_FmPort->h_FmPcd)) { RELEASE_LOCK(p_FmPort->lock); DBG(TRACE, ("Try LockAll - BUSY")); return ERROR_CODE(E_BUSY); } err = DeletePcd(h_FmPort); FmPcdLockUnlockAll(p_FmPort->h_FmPcd); if (err) { RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } if (p_FmPort->h_ReassemblyTree) { err = FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree); if (err) { RELEASE_LOCK(p_FmPort->lock); RETURN_ERROR(MAJOR, err, NO_MSG); } p_FmPort->h_ReassemblyTree = NULL; }RELEASE_LOCK(p_FmPort->lock); return err; } t_Error FM_PORT_PcdKgBindSchemes(t_Handle h_FmPort, t_FmPcdPortSchemesParams *p_PortScheme) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_FmPcdKgInterModuleBindPortToSchemes schemeBind; t_Error err = E_OK; uint32_t tmpScmVec = 0; int i; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_KG, E_INVALID_STATE); schemeBind.netEnvId = p_FmPort->netEnvId; schemeBind.hardwarePortId = p_FmPort->hardwarePortId; schemeBind.numOfSchemes = p_PortScheme->numOfSchemes; schemeBind.useClsPlan = p_FmPort->useClsPlan; for (i = 0; i < schemeBind.numOfSchemes; i++) { schemeBind.schemesIds[i] = FmPcdKgGetSchemeId( p_PortScheme->h_Schemes[i]); /* build vector */ tmpScmVec |= 1 << (31 - (uint32_t)schemeBind.schemesIds[i]); } if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) { DBG(TRACE, ("FM Port Try Lock - BUSY")); return ERROR_CODE(E_BUSY); } err = FmPcdKgBindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind); if (err == E_OK) p_FmPort->schemesPerPortVector |= tmpScmVec; #ifdef FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004 if ((FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd)) && (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) && (p_FmPort->fmRevInfo.majorRev < 6)) { for (i=0; inumOfSchemes; i++) FmPcdKgCcGetSetParams(p_FmPort->h_FmPcd, p_PortScheme->h_Schemes[i], UPDATE_KG_NIA_CC_WA, 0); } #endif /* FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004 */ RELEASE_LOCK(p_FmPort->lock); return err; } t_Error FM_PORT_PcdKgUnbindSchemes(t_Handle h_FmPort, t_FmPcdPortSchemesParams *p_PortScheme) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; t_FmPcdKgInterModuleBindPortToSchemes schemeBind; t_Error err = E_OK; uint32_t tmpScmVec = 0; int i; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_KG, E_INVALID_STATE); schemeBind.netEnvId = p_FmPort->netEnvId; schemeBind.hardwarePortId = p_FmPort->hardwarePortId; schemeBind.numOfSchemes = p_PortScheme->numOfSchemes; for (i = 0; i < schemeBind.numOfSchemes; i++) { schemeBind.schemesIds[i] = FmPcdKgGetSchemeId( p_PortScheme->h_Schemes[i]); /* build vector */ tmpScmVec |= 1 << (31 - (uint32_t)schemeBind.schemesIds[i]); } if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) { DBG(TRACE, ("FM Port Try Lock - BUSY")); return ERROR_CODE(E_BUSY); } err = FmPcdKgUnbindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind); if (err == E_OK) p_FmPort->schemesPerPortVector &= ~tmpScmVec; RELEASE_LOCK(p_FmPort->lock); return err; } t_Error FM_PORT_AddCongestionGrps(t_Handle h_FmPort, t_FmPortCongestionGrps *p_CongestionGrps) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; uint8_t priorityTmpArray[FM_PORT_NUM_OF_CONGESTION_GRPS]; uint8_t mod, index; uint32_t i, grpsMap[FMAN_PORT_CG_MAP_NUM]; int err; #if (DPAA_VERSION >= 11) int j; #endif /* (DPAA_VERSION >= 11) */ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); /* un-necessary check of the indexes; probably will be needed in the future when there will be more CGs available .... for (i=0; inumOfCongestionGrpsToConsider; i++) if (p_CongestionGrps->congestionGrpsToConsider[i] >= FM_PORT_NUM_OF_CONGESTION_GRPS) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("CG id!")); */ #ifdef FM_NO_OP_OBSERVED_CGS if ((p_FmPort->fmRevInfo.majorRev != 4) && (p_FmPort->fmRevInfo.majorRev < 6)) { if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Available for Rx ports only")); } else #endif /* FM_NO_OP_OBSERVED_CGS */ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Available for Rx & OP ports only")); /* Prepare groups map array */ memset(grpsMap, 0, FMAN_PORT_CG_MAP_NUM * sizeof(uint32_t)); for (i = 0; i < p_CongestionGrps->numOfCongestionGrpsToConsider; i++) { index = (uint8_t)(p_CongestionGrps->congestionGrpsToConsider[i] / 32); mod = (uint8_t)(p_CongestionGrps->congestionGrpsToConsider[i] % 32); if (p_FmPort->fmRevInfo.majorRev != 4) grpsMap[7 - index] |= (uint32_t)(1 << mod); else grpsMap[0] |= (uint32_t)(1 << mod); } memset(&priorityTmpArray, 0, FM_PORT_NUM_OF_CONGESTION_GRPS * sizeof(uint8_t)); for (i = 0; i < p_CongestionGrps->numOfCongestionGrpsToConsider; i++) { #if (DPAA_VERSION >= 11) for (j = 0; j < FM_MAX_NUM_OF_PFC_PRIORITIES; j++) if (p_CongestionGrps->pfcPrioritiesEn[i][j]) priorityTmpArray[p_CongestionGrps->congestionGrpsToConsider[i]] |= (0x01 << (FM_MAX_NUM_OF_PFC_PRIORITIES - j - 1)); #endif /* (DPAA_VERSION >= 11) */ } #if (DPAA_VERSION >= 11) for (i = 0; i < FM_PORT_NUM_OF_CONGESTION_GRPS; i++) { err = FmSetCongestionGroupPFCpriority(p_FmPort->h_Fm, i, priorityTmpArray[i]); if (err) return err; } #endif /* (DPAA_VERSION >= 11) */ err = fman_port_add_congestion_grps(&p_FmPort->port, grpsMap); if (err != 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_add_congestion_grps")); return E_OK; } t_Error FM_PORT_RemoveCongestionGrps(t_Handle h_FmPort, t_FmPortCongestionGrps *p_CongestionGrps) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; uint8_t mod, index; uint32_t i, grpsMap[FMAN_PORT_CG_MAP_NUM]; int err; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); { #ifdef FM_NO_OP_OBSERVED_CGS t_FmRevisionInfo revInfo; FM_GetRevision(p_FmPort->h_Fm, &revInfo); if (revInfo.majorRev != 4) { if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Available for Rx ports only")); } else #endif /* FM_NO_OP_OBSERVED_CGS */ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Available for Rx & OP ports only")); } /* Prepare groups map array */ memset(grpsMap, 0, FMAN_PORT_CG_MAP_NUM * sizeof(uint32_t)); for (i = 0; i < p_CongestionGrps->numOfCongestionGrpsToConsider; i++) { index = (uint8_t)(p_CongestionGrps->congestionGrpsToConsider[i] / 32); mod = (uint8_t)(p_CongestionGrps->congestionGrpsToConsider[i] % 32); if (p_FmPort->fmRevInfo.majorRev != 4) grpsMap[7 - index] |= (uint32_t)(1 << mod); else grpsMap[0] |= (uint32_t)(1 << mod); } #if (DPAA_VERSION >= 11) for (i = 0; i < p_CongestionGrps->numOfCongestionGrpsToConsider; i++) { t_Error err = FmSetCongestionGroupPFCpriority( p_FmPort->h_Fm, p_CongestionGrps->congestionGrpsToConsider[i], 0); if (err) return err; } #endif /* (DPAA_VERSION >= 11) */ err = fman_port_remove_congestion_grps(&p_FmPort->port, grpsMap); if (err != 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_remove_congestion_grps")); return E_OK; } #if (DPAA_VERSION >= 11) t_Error FM_PORT_GetIPv4OptionsCount(t_Handle h_FmPort, uint32_t *p_Ipv4OptionsCount) { t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR( (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING), E_INVALID_VALUE); SANITY_CHECK_RETURN_ERROR(p_FmPort->p_ParamsPage, E_INVALID_STATE); SANITY_CHECK_RETURN_ERROR(p_Ipv4OptionsCount, E_NULL_POINTER); *p_Ipv4OptionsCount = GET_UINT32(p_FmPort->p_ParamsPage->ipfOptionsCounter); return E_OK; } #endif /* (DPAA_VERSION >= 11) */ t_Error FM_PORT_ConfigDsarSupport(t_Handle h_FmPortRx, t_FmPortDsarTablesSizes *params) { t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx; p_FmPort->deepSleepVars.autoResMaxSizes = XX_Malloc( sizeof(struct t_FmPortDsarTablesSizes)); memcpy(p_FmPort->deepSleepVars.autoResMaxSizes, params, sizeof(struct t_FmPortDsarTablesSizes)); return E_OK; } static t_Error FmPortConfigAutoResForDeepSleepSupport1(t_FmPort *p_FmPort) { uint32_t *param_page; t_FmPortDsarTablesSizes *params = p_FmPort->deepSleepVars.autoResMaxSizes; t_ArCommonDesc *ArCommonDescPtr; uint32_t size = sizeof(t_ArCommonDesc); // ARP // should put here if (params->max_num_of_arp_entries)? size = ROUND_UP(size,4); size += sizeof(t_DsarArpDescriptor); size += sizeof(t_DsarArpBindingEntry) * params->maxNumOfArpEntries; size += sizeof(t_DsarArpStatistics); //ICMPV4 size = ROUND_UP(size,4); size += sizeof(t_DsarIcmpV4Descriptor); size += sizeof(t_DsarIcmpV4BindingEntry) * params->maxNumOfEchoIpv4Entries; size += sizeof(t_DsarIcmpV4Statistics); //ICMPV6 size = ROUND_UP(size,4); size += sizeof(t_DsarIcmpV6Descriptor); size += sizeof(t_DsarIcmpV6BindingEntry) * params->maxNumOfEchoIpv6Entries; size += sizeof(t_DsarIcmpV6Statistics); //ND size = ROUND_UP(size,4); size += sizeof(t_DsarNdDescriptor); size += sizeof(t_DsarIcmpV6BindingEntry) * params->maxNumOfNdpEntries; size += sizeof(t_DsarIcmpV6Statistics); //SNMP size = ROUND_UP(size,4); size += sizeof(t_DsarSnmpDescriptor); size += sizeof(t_DsarSnmpIpv4AddrTblEntry) * params->maxNumOfSnmpIPV4Entries; size += sizeof(t_DsarSnmpIpv6AddrTblEntry) * params->maxNumOfSnmpIPV6Entries; size += sizeof(t_OidsTblEntry) * params->maxNumOfSnmpOidEntries; size += params->maxNumOfSnmpOidChar; size += sizeof(t_DsarIcmpV6Statistics); //filters size = ROUND_UP(size,4); size += params->maxNumOfIpProtFiltering; size = ROUND_UP(size,4); size += params->maxNumOfUdpPortFiltering * sizeof(t_PortTblEntry); size = ROUND_UP(size,4); size += params->maxNumOfTcpPortFiltering * sizeof(t_PortTblEntry); // add here for more protocols // statistics size = ROUND_UP(size,4); size += sizeof(t_ArStatistics); ArCommonDescPtr = FM_MURAM_AllocMem(p_FmPort->h_FmMuram, size, 0x10); param_page = XX_PhysToVirt( p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr)); WRITE_UINT32( *param_page, (uint32_t)(XX_VirtToPhys(ArCommonDescPtr) - p_FmPort->fmMuramPhysBaseAddr)); return E_OK; } t_FmPortDsarTablesSizes* FM_PORT_GetDsarTablesMaxSizes(t_Handle h_FmPortRx) { t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx; return p_FmPort->deepSleepVars.autoResMaxSizes; } struct arOffsets { uint32_t arp; uint32_t nd; uint32_t icmpv4; uint32_t icmpv6; uint32_t snmp; uint32_t stats; uint32_t filtIp; uint32_t filtUdp; uint32_t filtTcp; }; static uint32_t AR_ComputeOffsets(struct arOffsets* of, struct t_FmPortDsarParams *params, t_FmPort *p_FmPort) { uint32_t size = sizeof(t_ArCommonDesc); // ARP if (params->p_AutoResArpInfo) { size = ROUND_UP(size,4); of->arp = size; size += sizeof(t_DsarArpDescriptor); size += sizeof(t_DsarArpBindingEntry) * params->p_AutoResArpInfo->tableSize; size += sizeof(t_DsarArpStatistics); } // ICMPV4 if (params->p_AutoResEchoIpv4Info) { size = ROUND_UP(size,4); of->icmpv4 = size; size += sizeof(t_DsarIcmpV4Descriptor); size += sizeof(t_DsarIcmpV4BindingEntry) * params->p_AutoResEchoIpv4Info->tableSize; size += sizeof(t_DsarIcmpV4Statistics); } // ICMPV6 if (params->p_AutoResEchoIpv6Info) { size = ROUND_UP(size,4); of->icmpv6 = size; size += sizeof(t_DsarIcmpV6Descriptor); size += sizeof(t_DsarIcmpV6BindingEntry) * params->p_AutoResEchoIpv6Info->tableSize; size += sizeof(t_DsarIcmpV6Statistics); } // ND if (params->p_AutoResNdpInfo) { size = ROUND_UP(size,4); of->nd = size; size += sizeof(t_DsarNdDescriptor); size += sizeof(t_DsarIcmpV6BindingEntry) * (params->p_AutoResNdpInfo->tableSizeAssigned + params->p_AutoResNdpInfo->tableSizeTmp); size += sizeof(t_DsarIcmpV6Statistics); } // SNMP if (params->p_AutoResSnmpInfo) { size = ROUND_UP(size,4); of->snmp = size; size += sizeof(t_DsarSnmpDescriptor); size += sizeof(t_DsarSnmpIpv4AddrTblEntry) * params->p_AutoResSnmpInfo->numOfIpv4Addresses; size += sizeof(t_DsarSnmpIpv6AddrTblEntry) * params->p_AutoResSnmpInfo->numOfIpv6Addresses; size += sizeof(t_OidsTblEntry) * params->p_AutoResSnmpInfo->oidsTblSize; size += p_FmPort->deepSleepVars.autoResMaxSizes->maxNumOfSnmpOidChar; size += sizeof(t_DsarIcmpV6Statistics); } //filters size = ROUND_UP(size,4); if (params->p_AutoResFilteringInfo) { of->filtIp = size; size += params->p_AutoResFilteringInfo->ipProtTableSize; size = ROUND_UP(size,4); of->filtUdp = size; size += params->p_AutoResFilteringInfo->udpPortsTableSize * sizeof(t_PortTblEntry); size = ROUND_UP(size,4); of->filtTcp = size; size += params->p_AutoResFilteringInfo->tcpPortsTableSize * sizeof(t_PortTblEntry); } // add here for more protocols // statistics size = ROUND_UP(size,4); of->stats = size; size += sizeof(t_ArStatistics); return size; } uint32_t* ARDesc; void PrsEnable(t_Handle p_FmPcd); void PrsDisable(t_Handle p_FmPcd); int PrsIsEnabled(t_Handle p_FmPcd); t_Handle FM_PCD_GetHcPort(t_Handle h_FmPcd); static t_Error DsarCheckParams(t_FmPortDsarParams *params, t_FmPortDsarTablesSizes *sizes) { bool macInit = FALSE; uint8_t mac[6]; int i = 0; // check table sizes if (params->p_AutoResArpInfo && sizes->maxNumOfArpEntries < params->p_AutoResArpInfo->tableSize) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: Arp table size exceeds the configured maximum size.")); if (params->p_AutoResEchoIpv4Info && sizes->maxNumOfEchoIpv4Entries < params->p_AutoResEchoIpv4Info->tableSize) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: EchoIpv4 table size exceeds the configured maximum size.")); if (params->p_AutoResNdpInfo && sizes->maxNumOfNdpEntries < params->p_AutoResNdpInfo->tableSizeAssigned + params->p_AutoResNdpInfo->tableSizeTmp) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: NDP table size exceeds the configured maximum size.")); if (params->p_AutoResEchoIpv6Info && sizes->maxNumOfEchoIpv6Entries < params->p_AutoResEchoIpv6Info->tableSize) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: EchoIpv6 table size exceeds the configured maximum size.")); if (params->p_AutoResSnmpInfo && sizes->maxNumOfSnmpOidEntries < params->p_AutoResSnmpInfo->oidsTblSize) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: Snmp Oid table size exceeds the configured maximum size.")); if (params->p_AutoResSnmpInfo && sizes->maxNumOfSnmpIPV4Entries < params->p_AutoResSnmpInfo->numOfIpv4Addresses) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: Snmp ipv4 table size exceeds the configured maximum size.")); if (params->p_AutoResSnmpInfo && sizes->maxNumOfSnmpIPV6Entries < params->p_AutoResSnmpInfo->numOfIpv6Addresses) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: Snmp ipv6 table size exceeds the configured maximum size.")); if (params->p_AutoResFilteringInfo) { if (sizes->maxNumOfIpProtFiltering < params->p_AutoResFilteringInfo->ipProtTableSize) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: ip filter table size exceeds the configured maximum size.")); if (sizes->maxNumOfTcpPortFiltering < params->p_AutoResFilteringInfo->udpPortsTableSize) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: udp filter table size exceeds the configured maximum size.")); if (sizes->maxNumOfUdpPortFiltering < params->p_AutoResFilteringInfo->tcpPortsTableSize) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: tcp filter table size exceeds the configured maximum size.")); } /* check only 1 MAC address is configured (this is what ucode currently supports) */ if (params->p_AutoResArpInfo && params->p_AutoResArpInfo->tableSize) { memcpy(mac, params->p_AutoResArpInfo->p_AutoResTable[0].mac, 6); i = 1; macInit = TRUE; for (; i < params->p_AutoResArpInfo->tableSize; i++) if (memcmp(mac, params->p_AutoResArpInfo->p_AutoResTable[i].mac, 6)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: Only 1 mac address is currently supported.")); } if (params->p_AutoResEchoIpv4Info && params->p_AutoResEchoIpv4Info->tableSize) { i = 0; if (!macInit) { memcpy(mac, params->p_AutoResEchoIpv4Info->p_AutoResTable[0].mac, 6); i = 1; macInit = TRUE; } for (; i < params->p_AutoResEchoIpv4Info->tableSize; i++) if (memcmp(mac, params->p_AutoResEchoIpv4Info->p_AutoResTable[i].mac, 6)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: Only 1 mac address is currently supported.")); } if (params->p_AutoResEchoIpv6Info && params->p_AutoResEchoIpv6Info->tableSize) { i = 0; if (!macInit) { memcpy(mac, params->p_AutoResEchoIpv6Info->p_AutoResTable[0].mac, 6); i = 1; macInit = TRUE; } for (; i < params->p_AutoResEchoIpv6Info->tableSize; i++) if (memcmp(mac, params->p_AutoResEchoIpv6Info->p_AutoResTable[i].mac, 6)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: Only 1 mac address is currently supported.")); } if (params->p_AutoResNdpInfo && params->p_AutoResNdpInfo->tableSizeAssigned) { i = 0; if (!macInit) { memcpy(mac, params->p_AutoResNdpInfo->p_AutoResTableAssigned[0].mac, 6); i = 1; macInit = TRUE; } for (; i < params->p_AutoResNdpInfo->tableSizeAssigned; i++) if (memcmp(mac, params->p_AutoResNdpInfo->p_AutoResTableAssigned[i].mac, 6)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: Only 1 mac address is currently supported.")); } if (params->p_AutoResNdpInfo && params->p_AutoResNdpInfo->tableSizeTmp) { i = 0; if (!macInit) { memcpy(mac, params->p_AutoResNdpInfo->p_AutoResTableTmp[0].mac, 6); i = 1; } for (; i < params->p_AutoResNdpInfo->tableSizeTmp; i++) if (memcmp(mac, params->p_AutoResNdpInfo->p_AutoResTableTmp[i].mac, 6)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("DSAR: Only 1 mac address is currently supported.")); } return E_OK; } static int GetBERLen(uint8_t* buf) { if (*buf & 0x80) { if ((*buf & 0x7F) == 1) return buf[1]; else return *(uint16_t*)&buf[1]; // assuming max len is 2 } else return buf[0]; } #define TOTAL_BER_LEN(len) (len < 128) ? len + 2 : len + 3 #ifdef TODO_SOC_SUSPEND // XXX #define SCFG_FMCLKDPSLPCR_ADDR 0xFFE0FC00C #define SCFG_FMCLKDPSLPCR_DS_VAL 0x08402000 #define SCFG_FMCLKDPSLPCR_NORMAL_VAL 0x00402000 static int fm_soc_suspend(void) { uint32_t *fmclk, tmp32; fmclk = ioremap(SCFG_FMCLKDPSLPCR_ADDR, 4); tmp32 = GET_UINT32(*fmclk); WRITE_UINT32(*fmclk, SCFG_FMCLKDPSLPCR_DS_VAL); tmp32 = GET_UINT32(*fmclk); iounmap(fmclk); return 0; } void fm_clk_down(void) { uint32_t *fmclk, tmp32; fmclk = ioremap(SCFG_FMCLKDPSLPCR_ADDR, 4); tmp32 = GET_UINT32(*fmclk); WRITE_UINT32(*fmclk, SCFG_FMCLKDPSLPCR_DS_VAL | 0x40000000); tmp32 = GET_UINT32(*fmclk); iounmap(fmclk); } #endif #if 0 t_Error FM_PORT_EnterDsar(t_Handle h_FmPortRx, t_FmPortDsarParams *params) { int i, j; t_Error err; uint32_t nia; t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx; t_FmPort *p_FmPortTx = (t_FmPort *)params->h_FmPortTx; t_DsarArpDescriptor *ArpDescriptor; t_DsarIcmpV4Descriptor* ICMPV4Descriptor; t_DsarIcmpV6Descriptor* ICMPV6Descriptor; t_DsarNdDescriptor* NDDescriptor; uint64_t fmMuramVirtBaseAddr = (uint64_t)PTR_TO_UINT(XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr)); uint32_t *param_page = XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr)); t_ArCommonDesc *ArCommonDescPtr = (t_ArCommonDesc*)(XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(*param_page))); struct arOffsets* of; uint8_t tmp = 0; t_FmGetSetParams fmGetSetParams; memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams)); fmGetSetParams.setParams.type = UPDATE_FPM_BRKC_SLP; fmGetSetParams.setParams.sleep = 1; err = DsarCheckParams(params, p_FmPort->deepSleepVars.autoResMaxSizes); if (err != E_OK) return err; p_FmPort->deepSleepVars.autoResOffsets = XX_Malloc(sizeof(struct arOffsets)); of = (struct arOffsets *)p_FmPort->deepSleepVars.autoResOffsets; IOMemSet32(ArCommonDescPtr, 0, AR_ComputeOffsets(of, params, p_FmPort)); // common WRITE_UINT8(ArCommonDescPtr->arTxPort, p_FmPortTx->hardwarePortId); nia = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne); // bmi nia if ((nia & 0x007C0000) == 0x00440000) // bmi nia is parser WRITE_UINT32(ArCommonDescPtr->activeHPNIA, GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne)); else WRITE_UINT32(ArCommonDescPtr->activeHPNIA, nia); WRITE_UINT16(ArCommonDescPtr->snmpPort, 161); // ARP if (params->p_AutoResArpInfo) { t_DsarArpBindingEntry* arp_bindings; ArpDescriptor = (t_DsarArpDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->arp); WRITE_UINT32(ArCommonDescPtr->p_ArpDescriptor, PTR_TO_UINT(ArpDescriptor) - fmMuramVirtBaseAddr); arp_bindings = (t_DsarArpBindingEntry*)(PTR_TO_UINT(ArpDescriptor) + sizeof(t_DsarArpDescriptor)); if (params->p_AutoResArpInfo->enableConflictDetection) WRITE_UINT16(ArpDescriptor->control, 1); else WRITE_UINT16(ArpDescriptor->control, 0); if (params->p_AutoResArpInfo->tableSize) { t_FmPortDsarArpEntry* arp_entry = params->p_AutoResArpInfo->p_AutoResTable; WRITE_UINT16(*(uint16_t*)&ArCommonDescPtr->macStationAddr[0], *(uint16_t*)&arp_entry[0].mac[0]); WRITE_UINT32(*(uint32_t*)&ArCommonDescPtr->macStationAddr[2], *(uint32_t*)&arp_entry[0].mac[2]); WRITE_UINT16(ArpDescriptor->numOfBindings, params->p_AutoResArpInfo->tableSize); for (i = 0; i < params->p_AutoResArpInfo->tableSize; i++) { WRITE_UINT32(arp_bindings[i].ipv4Addr, arp_entry[i].ipAddress); if (arp_entry[i].isVlan) WRITE_UINT16(arp_bindings[i].vlanId, arp_entry[i].vid & 0xFFF); } WRITE_UINT32(ArpDescriptor->p_Bindings, PTR_TO_UINT(arp_bindings) - fmMuramVirtBaseAddr); } WRITE_UINT32(ArpDescriptor->p_Statistics, PTR_TO_UINT(arp_bindings) + sizeof(t_DsarArpBindingEntry) * params->p_AutoResArpInfo->tableSize - fmMuramVirtBaseAddr); } // ICMPV4 if (params->p_AutoResEchoIpv4Info) { t_DsarIcmpV4BindingEntry* icmpv4_bindings; ICMPV4Descriptor = (t_DsarIcmpV4Descriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->icmpv4); WRITE_UINT32(ArCommonDescPtr->p_IcmpV4Descriptor, PTR_TO_UINT(ICMPV4Descriptor) - fmMuramVirtBaseAddr); icmpv4_bindings = (t_DsarIcmpV4BindingEntry*)(PTR_TO_UINT(ICMPV4Descriptor) + sizeof(t_DsarIcmpV4Descriptor)); WRITE_UINT16(ICMPV4Descriptor->control, 0); if (params->p_AutoResEchoIpv4Info->tableSize) { t_FmPortDsarArpEntry* arp_entry = params->p_AutoResEchoIpv4Info->p_AutoResTable; WRITE_UINT16(*(uint16_t*)&ArCommonDescPtr->macStationAddr[0], *(uint16_t*)&arp_entry[0].mac[0]); WRITE_UINT32(*(uint32_t*)&ArCommonDescPtr->macStationAddr[2], *(uint32_t*)&arp_entry[0].mac[2]); WRITE_UINT16(ICMPV4Descriptor->numOfBindings, params->p_AutoResEchoIpv4Info->tableSize); for (i = 0; i < params->p_AutoResEchoIpv4Info->tableSize; i++) { WRITE_UINT32(icmpv4_bindings[i].ipv4Addr, arp_entry[i].ipAddress); if (arp_entry[i].isVlan) WRITE_UINT16(icmpv4_bindings[i].vlanId, arp_entry[i].vid & 0xFFF); } WRITE_UINT32(ICMPV4Descriptor->p_Bindings, PTR_TO_UINT(icmpv4_bindings) - fmMuramVirtBaseAddr); } WRITE_UINT32(ICMPV4Descriptor->p_Statistics, PTR_TO_UINT(icmpv4_bindings) + sizeof(t_DsarIcmpV4BindingEntry) * params->p_AutoResEchoIpv4Info->tableSize - fmMuramVirtBaseAddr); } // ICMPV6 if (params->p_AutoResEchoIpv6Info) { t_DsarIcmpV6BindingEntry* icmpv6_bindings; ICMPV6Descriptor = (t_DsarIcmpV6Descriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->icmpv6); WRITE_UINT32(ArCommonDescPtr->p_IcmpV6Descriptor, PTR_TO_UINT(ICMPV6Descriptor) - fmMuramVirtBaseAddr); icmpv6_bindings = (t_DsarIcmpV6BindingEntry*)(PTR_TO_UINT(ICMPV6Descriptor) + sizeof(t_DsarIcmpV6Descriptor)); WRITE_UINT16(ICMPV6Descriptor->control, 0); if (params->p_AutoResEchoIpv6Info->tableSize) { t_FmPortDsarNdpEntry* ndp_entry = params->p_AutoResEchoIpv6Info->p_AutoResTable; WRITE_UINT16(*(uint16_t*)&ArCommonDescPtr->macStationAddr[0], *(uint16_t*)&ndp_entry[0].mac[0]); WRITE_UINT32(*(uint32_t*)&ArCommonDescPtr->macStationAddr[2], *(uint32_t*)&ndp_entry[0].mac[2]); WRITE_UINT16(ICMPV6Descriptor->numOfBindings, params->p_AutoResEchoIpv6Info->tableSize); for (i = 0; i < params->p_AutoResEchoIpv6Info->tableSize; i++) { for (j = 0; j < 4; j++) WRITE_UINT32(icmpv6_bindings[i].ipv6Addr[j], ndp_entry[i].ipAddress[j]); if (ndp_entry[i].isVlan) WRITE_UINT16(*(uint16_t*)&icmpv6_bindings[i].ipv6Addr[4], ndp_entry[i].vid & 0xFFF); // writing vlan } WRITE_UINT32(ICMPV6Descriptor->p_Bindings, PTR_TO_UINT(icmpv6_bindings) - fmMuramVirtBaseAddr); } WRITE_UINT32(ICMPV6Descriptor->p_Statistics, PTR_TO_UINT(icmpv6_bindings) + sizeof(t_DsarIcmpV6BindingEntry) * params->p_AutoResEchoIpv6Info->tableSize - fmMuramVirtBaseAddr); } // ND if (params->p_AutoResNdpInfo) { t_DsarIcmpV6BindingEntry* icmpv6_bindings; NDDescriptor = (t_DsarNdDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->nd); WRITE_UINT32(ArCommonDescPtr->p_NdDescriptor, PTR_TO_UINT(NDDescriptor) - fmMuramVirtBaseAddr); icmpv6_bindings = (t_DsarIcmpV6BindingEntry*)(PTR_TO_UINT(NDDescriptor) + sizeof(t_DsarNdDescriptor)); if (params->p_AutoResNdpInfo->enableConflictDetection) WRITE_UINT16(NDDescriptor->control, 1); else WRITE_UINT16(NDDescriptor->control, 0); if (params->p_AutoResNdpInfo->tableSizeAssigned + params->p_AutoResNdpInfo->tableSizeTmp) { t_FmPortDsarNdpEntry* ndp_entry = params->p_AutoResNdpInfo->p_AutoResTableAssigned; WRITE_UINT16(*(uint16_t*)&ArCommonDescPtr->macStationAddr[0], *(uint16_t*)&ndp_entry[0].mac[0]); WRITE_UINT32(*(uint32_t*)&ArCommonDescPtr->macStationAddr[2], *(uint32_t*)&ndp_entry[0].mac[2]); WRITE_UINT16(NDDescriptor->numOfBindings, params->p_AutoResNdpInfo->tableSizeAssigned + params->p_AutoResNdpInfo->tableSizeTmp); for (i = 0; i < params->p_AutoResNdpInfo->tableSizeAssigned; i++) { for (j = 0; j < 4; j++) WRITE_UINT32(icmpv6_bindings[i].ipv6Addr[j], ndp_entry[i].ipAddress[j]); if (ndp_entry[i].isVlan) WRITE_UINT16(*(uint16_t*)&icmpv6_bindings[i].ipv6Addr[4], ndp_entry[i].vid & 0xFFF); // writing vlan } ndp_entry = params->p_AutoResNdpInfo->p_AutoResTableTmp; for (i = 0; i < params->p_AutoResNdpInfo->tableSizeTmp; i++) { for (j = 0; j < 4; j++) WRITE_UINT32(icmpv6_bindings[i + params->p_AutoResNdpInfo->tableSizeAssigned].ipv6Addr[j], ndp_entry[i].ipAddress[j]); if (ndp_entry[i].isVlan) WRITE_UINT16(*(uint16_t*)&icmpv6_bindings[i + params->p_AutoResNdpInfo->tableSizeAssigned].ipv6Addr[4], ndp_entry[i].vid & 0xFFF); // writing vlan } WRITE_UINT32(NDDescriptor->p_Bindings, PTR_TO_UINT(icmpv6_bindings) - fmMuramVirtBaseAddr); } WRITE_UINT32(NDDescriptor->p_Statistics, PTR_TO_UINT(icmpv6_bindings) + sizeof(t_DsarIcmpV6BindingEntry) * (params->p_AutoResNdpInfo->tableSizeAssigned + params->p_AutoResNdpInfo->tableSizeTmp) - fmMuramVirtBaseAddr); WRITE_UINT32(NDDescriptor->solicitedAddr, 0xFFFFFFFF); } // SNMP if (params->p_AutoResSnmpInfo) { t_FmPortDsarSnmpInfo *snmpSrc = params->p_AutoResSnmpInfo; t_DsarSnmpIpv4AddrTblEntry* snmpIpv4Addr; t_DsarSnmpIpv6AddrTblEntry* snmpIpv6Addr; t_OidsTblEntry* snmpOid; uint8_t *charPointer; int len; t_DsarSnmpDescriptor* SnmpDescriptor = (t_DsarSnmpDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->snmp); WRITE_UINT32(ArCommonDescPtr->p_SnmpDescriptor, PTR_TO_UINT(SnmpDescriptor) - fmMuramVirtBaseAddr); WRITE_UINT16(SnmpDescriptor->control, snmpSrc->control); WRITE_UINT16(SnmpDescriptor->maxSnmpMsgLength, snmpSrc->maxSnmpMsgLength); snmpIpv4Addr = (t_DsarSnmpIpv4AddrTblEntry*)(PTR_TO_UINT(SnmpDescriptor) + sizeof(t_DsarSnmpDescriptor)); if (snmpSrc->numOfIpv4Addresses) { t_FmPortDsarSnmpIpv4AddrTblEntry* snmpIpv4AddrSrc = snmpSrc->p_Ipv4AddrTbl; WRITE_UINT16(SnmpDescriptor->numOfIpv4Addresses, snmpSrc->numOfIpv4Addresses); for (i = 0; i < snmpSrc->numOfIpv4Addresses; i++) { WRITE_UINT32(snmpIpv4Addr[i].ipv4Addr, snmpIpv4AddrSrc[i].ipv4Addr); if (snmpIpv4AddrSrc[i].isVlan) WRITE_UINT16(snmpIpv4Addr[i].vlanId, snmpIpv4AddrSrc[i].vid & 0xFFF); } WRITE_UINT32(SnmpDescriptor->p_Ipv4AddrTbl, PTR_TO_UINT(snmpIpv4Addr) - fmMuramVirtBaseAddr); } snmpIpv6Addr = (t_DsarSnmpIpv6AddrTblEntry*)(PTR_TO_UINT(snmpIpv4Addr) + sizeof(t_DsarSnmpIpv4AddrTblEntry) * snmpSrc->numOfIpv4Addresses); if (snmpSrc->numOfIpv6Addresses) { t_FmPortDsarSnmpIpv6AddrTblEntry* snmpIpv6AddrSrc = snmpSrc->p_Ipv6AddrTbl; WRITE_UINT16(SnmpDescriptor->numOfIpv6Addresses, snmpSrc->numOfIpv6Addresses); for (i = 0; i < snmpSrc->numOfIpv6Addresses; i++) { for (j = 0; j < 4; j++) WRITE_UINT32(snmpIpv6Addr[i].ipv6Addr[j], snmpIpv6AddrSrc[i].ipv6Addr[j]); if (snmpIpv6AddrSrc[i].isVlan) WRITE_UINT16(snmpIpv6Addr[i].vlanId, snmpIpv6AddrSrc[i].vid & 0xFFF); } WRITE_UINT32(SnmpDescriptor->p_Ipv6AddrTbl, PTR_TO_UINT(snmpIpv6Addr) - fmMuramVirtBaseAddr); } snmpOid = (t_OidsTblEntry*)(PTR_TO_UINT(snmpIpv6Addr) + sizeof(t_DsarSnmpIpv6AddrTblEntry) * snmpSrc->numOfIpv6Addresses); charPointer = (uint8_t*)(PTR_TO_UINT(snmpOid) + sizeof(t_OidsTblEntry) * snmpSrc->oidsTblSize); len = TOTAL_BER_LEN(GetBERLen(&snmpSrc->p_RdOnlyCommunityStr[1])); Mem2IOCpy32(charPointer, snmpSrc->p_RdOnlyCommunityStr, len); WRITE_UINT32(SnmpDescriptor->p_RdOnlyCommunityStr, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr); charPointer += len; len = TOTAL_BER_LEN(GetBERLen(&snmpSrc->p_RdWrCommunityStr[1])); Mem2IOCpy32(charPointer, snmpSrc->p_RdWrCommunityStr, len); WRITE_UINT32(SnmpDescriptor->p_RdWrCommunityStr, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr); charPointer += len; WRITE_UINT32(SnmpDescriptor->oidsTblSize, snmpSrc->oidsTblSize); WRITE_UINT32(SnmpDescriptor->p_OidsTbl, PTR_TO_UINT(snmpOid) - fmMuramVirtBaseAddr); for (i = 0; i < snmpSrc->oidsTblSize; i++) { WRITE_UINT16(snmpOid->oidSize, snmpSrc->p_OidsTbl[i].oidSize); WRITE_UINT16(snmpOid->resSize, snmpSrc->p_OidsTbl[i].resSize); Mem2IOCpy32(charPointer, snmpSrc->p_OidsTbl[i].oidVal, snmpSrc->p_OidsTbl[i].oidSize); WRITE_UINT32(snmpOid->p_Oid, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr); charPointer += snmpSrc->p_OidsTbl[i].oidSize; if (snmpSrc->p_OidsTbl[i].resSize <= 4) WRITE_UINT32(snmpOid->resValOrPtr, *snmpSrc->p_OidsTbl[i].resVal); else { Mem2IOCpy32(charPointer, snmpSrc->p_OidsTbl[i].resVal, snmpSrc->p_OidsTbl[i].resSize); WRITE_UINT32(snmpOid->resValOrPtr, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr); charPointer += snmpSrc->p_OidsTbl[i].resSize; } snmpOid++; } charPointer = UINT_TO_PTR(ROUND_UP(PTR_TO_UINT(charPointer),4)); WRITE_UINT32(SnmpDescriptor->p_Statistics, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr); } // filtering if (params->p_AutoResFilteringInfo) { if (params->p_AutoResFilteringInfo->ipProtPassOnHit) tmp |= IP_PROT_TBL_PASS_MASK; if (params->p_AutoResFilteringInfo->udpPortPassOnHit) tmp |= UDP_PORT_TBL_PASS_MASK; if (params->p_AutoResFilteringInfo->tcpPortPassOnHit) tmp |= TCP_PORT_TBL_PASS_MASK; WRITE_UINT8(ArCommonDescPtr->filterControl, tmp); WRITE_UINT16(ArCommonDescPtr->tcpControlPass, params->p_AutoResFilteringInfo->tcpFlagsMask); // ip filtering if (params->p_AutoResFilteringInfo->ipProtTableSize) { uint8_t* ip_tbl = (uint8_t*)(PTR_TO_UINT(ArCommonDescPtr) + of->filtIp); WRITE_UINT8(ArCommonDescPtr->ipProtocolTblSize, params->p_AutoResFilteringInfo->ipProtTableSize); for (i = 0; i < params->p_AutoResFilteringInfo->ipProtTableSize; i++) WRITE_UINT8(ip_tbl[i], params->p_AutoResFilteringInfo->p_IpProtTablePtr[i]); WRITE_UINT32(ArCommonDescPtr->p_IpProtocolFiltTbl, PTR_TO_UINT(ip_tbl) - fmMuramVirtBaseAddr); } // udp filtering if (params->p_AutoResFilteringInfo->udpPortsTableSize) { t_PortTblEntry* udp_tbl = (t_PortTblEntry*)(PTR_TO_UINT(ArCommonDescPtr) + of->filtUdp); WRITE_UINT8(ArCommonDescPtr->udpPortTblSize, params->p_AutoResFilteringInfo->udpPortsTableSize); for (i = 0; i < params->p_AutoResFilteringInfo->udpPortsTableSize; i++) { WRITE_UINT32(udp_tbl[i].Ports, (params->p_AutoResFilteringInfo->p_UdpPortsTablePtr[i].srcPort << 16) + params->p_AutoResFilteringInfo->p_UdpPortsTablePtr[i].dstPort); WRITE_UINT32(udp_tbl[i].PortsMask, (params->p_AutoResFilteringInfo->p_UdpPortsTablePtr[i].srcPortMask << 16) + params->p_AutoResFilteringInfo->p_UdpPortsTablePtr[i].dstPortMask); } WRITE_UINT32(ArCommonDescPtr->p_UdpPortFiltTbl, PTR_TO_UINT(udp_tbl) - fmMuramVirtBaseAddr); } // tcp filtering if (params->p_AutoResFilteringInfo->tcpPortsTableSize) { t_PortTblEntry* tcp_tbl = (t_PortTblEntry*)(PTR_TO_UINT(ArCommonDescPtr) + of->filtTcp); WRITE_UINT8(ArCommonDescPtr->tcpPortTblSize, params->p_AutoResFilteringInfo->tcpPortsTableSize); for (i = 0; i < params->p_AutoResFilteringInfo->tcpPortsTableSize; i++) { WRITE_UINT32(tcp_tbl[i].Ports, (params->p_AutoResFilteringInfo->p_TcpPortsTablePtr[i].srcPort << 16) + params->p_AutoResFilteringInfo->p_TcpPortsTablePtr[i].dstPort); WRITE_UINT32(tcp_tbl[i].PortsMask, (params->p_AutoResFilteringInfo->p_TcpPortsTablePtr[i].srcPortMask << 16) + params->p_AutoResFilteringInfo->p_TcpPortsTablePtr[i].dstPortMask); } WRITE_UINT32(ArCommonDescPtr->p_TcpPortFiltTbl, PTR_TO_UINT(tcp_tbl) - fmMuramVirtBaseAddr); } } // common stats WRITE_UINT32(ArCommonDescPtr->p_ArStats, PTR_TO_UINT(ArCommonDescPtr) + of->stats - fmMuramVirtBaseAddr); // get into Deep Sleep sequence: // Ensures that FMan do not enter the idle state. This is done by programing // FMDPSLPCR[FM_STOP] to one. fm_soc_suspend(); ARDesc = UINT_TO_PTR(XX_VirtToPhys(ArCommonDescPtr)); return E_OK; } void FM_ChangeClock(t_Handle h_Fm, int hardwarePortId); t_Error FM_PORT_EnterDsarFinal(t_Handle h_DsarRxPort, t_Handle h_DsarTxPort) { t_FmGetSetParams fmGetSetParams; t_FmPort *p_FmPort = (t_FmPort *)h_DsarRxPort; t_FmPort *p_FmPortTx = (t_FmPort *)h_DsarTxPort; t_Handle *h_FmPcd = FmGetPcd(p_FmPort->h_Fm); t_FmPort *p_FmPortHc = FM_PCD_GetHcPort(h_FmPcd); memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams)); fmGetSetParams.setParams.type = UPDATE_FM_CLD; FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams); /* Issue graceful stop to HC port */ FM_PORT_Disable(p_FmPortHc); // config tx port p_FmPort->deepSleepVars.fmbm_tcfg = GET_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg); WRITE_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg, GET_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg) | BMI_PORT_CFG_IM | BMI_PORT_CFG_EN); // ???? p_FmPort->deepSleepVars.fmbm_tcmne = GET_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcmne); WRITE_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcmne, 0xE); // Stage 7:echo p_FmPort->deepSleepVars.fmbm_rfpne = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne); WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne, 0x2E); if (!PrsIsEnabled(h_FmPcd)) { p_FmPort->deepSleepVars.dsarEnabledParser = TRUE; PrsEnable(h_FmPcd); } else p_FmPort->deepSleepVars.dsarEnabledParser = FALSE; p_FmPort->deepSleepVars.fmbm_rfne = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne); WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne, 0x440000); // save rcfg for restoring: accumulate mode is changed by ucode p_FmPort->deepSleepVars.fmbm_rcfg = GET_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rcfg); WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rcfg, p_FmPort->deepSleepVars.fmbm_rcfg | BMI_PORT_CFG_AM); memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams)); fmGetSetParams.setParams.type = UPDATE_FPM_BRKC_SLP; fmGetSetParams.setParams.sleep = 1; FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams); // ***** issue external request sync command memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams)); fmGetSetParams.setParams.type = UPDATE_FPM_EXTC; FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams); // get memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams)); fmGetSetParams.getParams.type = GET_FMFP_EXTC; FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams); if (fmGetSetParams.getParams.fmfp_extc != 0) { // clear memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams)); fmGetSetParams.setParams.type = UPDATE_FPM_EXTC_CLEAR; FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams); } memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams)); fmGetSetParams.getParams.type = GET_FMFP_EXTC | GET_FM_NPI; do { FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams); } while (fmGetSetParams.getParams.fmfp_extc != 0 && fmGetSetParams.getParams.fm_npi == 0); if (fmGetSetParams.getParams.fm_npi != 0) XX_Print("FM: Sync did not finish\n"); // check that all stoped memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams)); fmGetSetParams.getParams.type = GET_FMQM_GS | GET_FM_NPI; FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams); while (fmGetSetParams.getParams.fmqm_gs & 0xF0000000) FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams); if (fmGetSetParams.getParams.fmqm_gs == 0 && fmGetSetParams.getParams.fm_npi == 0) XX_Print("FM: Sleeping\n"); // FM_ChangeClock(p_FmPort->h_Fm, p_FmPort->hardwarePortId); return E_OK; } void FM_PORT_Dsar_DumpRegs() { uint32_t* hh = XX_PhysToVirt(PTR_TO_UINT(ARDesc)); DUMP_MEMORY(hh, 0x220); } void FM_PORT_ExitDsar(t_Handle h_FmPortRx, t_Handle h_FmPortTx) { t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx; t_FmPort *p_FmPortTx = (t_FmPort *)h_FmPortTx; t_Handle *h_FmPcd = FmGetPcd(p_FmPort->h_Fm); t_FmPort *p_FmPortHc = FM_PCD_GetHcPort(h_FmPcd); t_FmGetSetParams fmGetSetParams; memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams)); fmGetSetParams.setParams.type = UPDATE_FPM_BRKC_SLP; fmGetSetParams.setParams.sleep = 0; if (p_FmPort->deepSleepVars.autoResOffsets) { XX_Free(p_FmPort->deepSleepVars.autoResOffsets); p_FmPort->deepSleepVars.autoResOffsets = 0; } if (p_FmPort->deepSleepVars.dsarEnabledParser) PrsDisable(FmGetPcd(p_FmPort->h_Fm)); WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne, p_FmPort->deepSleepVars.fmbm_rfpne); WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne, p_FmPort->deepSleepVars.fmbm_rfne); WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rcfg, p_FmPort->deepSleepVars.fmbm_rcfg); FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams); WRITE_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcmne, p_FmPort->deepSleepVars.fmbm_tcmne); WRITE_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg, p_FmPort->deepSleepVars.fmbm_tcfg); FM_PORT_Enable(p_FmPortHc); } bool FM_PORT_IsInDsar(t_Handle h_FmPort) { t_FmPort *p_FmPort = (t_FmPort *)h_FmPort; return PTR_TO_UINT(p_FmPort->deepSleepVars.autoResOffsets); } t_Error FM_PORT_GetDsarStats(t_Handle h_FmPortRx, t_FmPortDsarStats *stats) { t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx; struct arOffsets *of = (struct arOffsets*)p_FmPort->deepSleepVars.autoResOffsets; uint8_t* fmMuramVirtBaseAddr = XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr); uint32_t *param_page = XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr)); t_ArCommonDesc *ArCommonDescPtr = (t_ArCommonDesc*)(XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(*param_page))); t_DsarArpDescriptor *ArpDescriptor = (t_DsarArpDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->arp); t_DsarArpStatistics* arp_stats = (t_DsarArpStatistics*)(PTR_TO_UINT(ArpDescriptor->p_Statistics) + fmMuramVirtBaseAddr); t_DsarIcmpV4Descriptor* ICMPV4Descriptor = (t_DsarIcmpV4Descriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->icmpv4); t_DsarIcmpV4Statistics* icmpv4_stats = (t_DsarIcmpV4Statistics*)(PTR_TO_UINT(ICMPV4Descriptor->p_Statistics) + fmMuramVirtBaseAddr); t_DsarNdDescriptor* NDDescriptor = (t_DsarNdDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->nd); t_NdStatistics* nd_stats = (t_NdStatistics*)(PTR_TO_UINT(NDDescriptor->p_Statistics) + fmMuramVirtBaseAddr); t_DsarIcmpV6Descriptor* ICMPV6Descriptor = (t_DsarIcmpV6Descriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->icmpv6); t_DsarIcmpV6Statistics* icmpv6_stats = (t_DsarIcmpV6Statistics*)(PTR_TO_UINT(ICMPV6Descriptor->p_Statistics) + fmMuramVirtBaseAddr); t_DsarSnmpDescriptor* SnmpDescriptor = (t_DsarSnmpDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->snmp); t_DsarSnmpStatistics* snmp_stats = (t_DsarSnmpStatistics*)(PTR_TO_UINT(SnmpDescriptor->p_Statistics) + fmMuramVirtBaseAddr); stats->arpArCnt = arp_stats->arCnt; stats->echoIcmpv4ArCnt = icmpv4_stats->arCnt; stats->ndpArCnt = nd_stats->arCnt; stats->echoIcmpv6ArCnt = icmpv6_stats->arCnt; stats->snmpGetCnt = snmp_stats->snmpGetReqCnt; stats->snmpGetNextCnt = snmp_stats->snmpGetNextReqCnt; return E_OK; } #endif Index: projects/runtime-coverage/sys/contrib/ncsw/Peripherals/QM/qm_portal_fqr.c =================================================================== --- projects/runtime-coverage/sys/contrib/ncsw/Peripherals/QM/qm_portal_fqr.c (revision 325209) +++ projects/runtime-coverage/sys/contrib/ncsw/Peripherals/QM/qm_portal_fqr.c (revision 325210) @@ -1,2735 +1,2735 @@ /****************************************************************************** © 1995-2003, 2004, 2005-2011 Freescale Semiconductor, Inc. All rights reserved. This is proprietary source code of Freescale Semiconductor Inc., and its use is subject to the NetComm Device Drivers EULA. The copyright notice above does not evidence any actual or intended publication of such source code. ALTERNATIVELY, redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Freescale Semiconductor nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************/ /****************************************************************************** @File qm.c @Description QM & Portal implementation *//***************************************************************************/ #include #include #include #include "error_ext.h" #include "std_ext.h" #include "string_ext.h" #include "mm_ext.h" #include "qm.h" #include "qman_low.h" #include /****************************************/ /* static functions */ /****************************************/ #define SLOW_POLL_IDLE 1000 #define SLOW_POLL_BUSY 10 /* * Context entries are 32-bit. The qman driver uses the pointer to the queue as * its context, and the pointer is 64-byte aligned, per the XX_MallocSmart() * call. Take advantage of this fact to shove a 64-bit kernel pointer into a * 32-bit context integer, and back. * * XXX: This depends on the fact that VM_MAX_KERNEL_ADDRESS is less than 38-bit * count from VM_MIN_KERNEL_ADDRESS. If this ever changes, this needs to be * updated. */ CTASSERT((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) < (1ULL << 35)); static inline uint32_t aligned_int_from_ptr(const void *p) { uintptr_t ctx; ctx = (uintptr_t)p; KASSERT(ctx >= VM_MIN_KERNEL_ADDRESS, ("%p is too low!\n", p)); ctx -= VM_MIN_KERNEL_ADDRESS; KASSERT((ctx & 0x07) == 0, ("Pointer %p is not 8-byte aligned!\n", p)); - if ((ctx & (0x7)) != 0) - return (0); + return (ctx >> 3); } static inline void * ptr_from_aligned_int(uint32_t ctx) { uintptr_t p; - p = VM_MIN_KERNEL_ADDRESS + (ctx << 3); + p = ctx; + p = VM_MIN_KERNEL_ADDRESS + (p << 3); return ((void *)p); } static t_Error qman_volatile_dequeue(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq, uint32_t vdqcr) { ASSERT_COND((p_Fq->state == qman_fq_state_parked) || (p_Fq->state == qman_fq_state_retired)); ASSERT_COND(!(vdqcr & QM_VDQCR_FQID_MASK)); ASSERT_COND(!(p_Fq->flags & QMAN_FQ_STATE_VDQCR)); vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | p_Fq->fqid; NCSW_PLOCK(p_QmPortal); FQLOCK(p_Fq); p_Fq->flags |= QMAN_FQ_STATE_VDQCR; qm_dqrr_vdqcr_set(p_QmPortal->p_LowQmPortal, vdqcr); FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return E_OK; } static const char *mcr_result_str(uint8_t result) { switch (result) { case QM_MCR_RESULT_NULL: return "QM_MCR_RESULT_NULL"; case QM_MCR_RESULT_OK: return "QM_MCR_RESULT_OK"; case QM_MCR_RESULT_ERR_FQID: return "QM_MCR_RESULT_ERR_FQID"; case QM_MCR_RESULT_ERR_FQSTATE: return "QM_MCR_RESULT_ERR_FQSTATE"; case QM_MCR_RESULT_ERR_NOTEMPTY: return "QM_MCR_RESULT_ERR_NOTEMPTY"; case QM_MCR_RESULT_PENDING: return "QM_MCR_RESULT_PENDING"; } return ""; } static t_Error qman_create_fq(t_QmPortal *p_QmPortal, uint32_t fqid, uint32_t flags, struct qman_fq *p_Fq) { struct qm_fqd fqd; struct qm_mcr_queryfq_np np; struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; p_Fq->fqid = fqid; p_Fq->flags = flags; p_Fq->state = qman_fq_state_oos; p_Fq->cgr_groupid = 0; if (!(flags & QMAN_FQ_FLAG_RECOVER) || (flags & QMAN_FQ_FLAG_NO_MODIFY)) return E_OK; /* Everything else is RECOVER support */ NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->queryfq.fqid = fqid; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QUERYFQ failed: %s", mcr_result_str(p_Mcr->result))); } fqd = p_Mcr->queryfq.fqd; p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->queryfq_np.fqid = fqid; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ_NP); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("UERYFQ_NP failed: %s", mcr_result_str(p_Mcr->result))); } np = p_Mcr->queryfq_np; /* Phew, have queryfq and queryfq_np results, stitch together * the FQ object from those. */ p_Fq->cgr_groupid = fqd.cgid; switch (np.state & QM_MCR_NP_STATE_MASK) { case QM_MCR_NP_STATE_OOS: break; case QM_MCR_NP_STATE_RETIRED: p_Fq->state = qman_fq_state_retired; if (np.frm_cnt) p_Fq->flags |= QMAN_FQ_STATE_NE; break; case QM_MCR_NP_STATE_TEN_SCHED: case QM_MCR_NP_STATE_TRU_SCHED: case QM_MCR_NP_STATE_ACTIVE: p_Fq->state = qman_fq_state_sched; if (np.state & QM_MCR_NP_STATE_R) p_Fq->flags |= QMAN_FQ_STATE_CHANGING; break; case QM_MCR_NP_STATE_PARKED: p_Fq->state = qman_fq_state_parked; break; default: ASSERT_COND(FALSE); } if (fqd.fq_ctrl & QM_FQCTRL_CGE) p_Fq->state |= QMAN_FQ_STATE_CGR_EN; PUNLOCK(p_QmPortal); return E_OK; } static void qman_destroy_fq(struct qman_fq *p_Fq, uint32_t flags) { /* We don't need to lock the FQ as it is a pre-condition that the FQ be * quiesced. Instead, run some checks. */ UNUSED(flags); switch (p_Fq->state) { case qman_fq_state_parked: ASSERT_COND(flags & QMAN_FQ_DESTROY_PARKED); case qman_fq_state_oos: return; default: break; } ASSERT_COND(FALSE); } static t_Error qman_init_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq, uint32_t flags, struct qm_mcc_initfq *p_Opts) { struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; uint8_t res, myverb = (uint8_t)((flags & QMAN_INITFQ_FLAG_SCHED) ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED); SANITY_CHECK_RETURN_ERROR((p_Fq->state == qman_fq_state_oos) || (p_Fq->state == qman_fq_state_parked), E_INVALID_STATE); if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY) return ERROR_CODE(E_INVALID_VALUE); /* Issue an INITFQ_[PARKED|SCHED] management command */ NCSW_PLOCK(p_QmPortal); FQLOCK(p_Fq); if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) || ((p_Fq->state != qman_fq_state_oos) && (p_Fq->state != qman_fq_state_parked))) { FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return ERROR_CODE(E_BUSY); } p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); Mem2IOCpy32((void*)&p_Mcc->initfq, p_Opts, sizeof(struct qm_mcc_initfq)); qm_mc_commit(p_QmPortal->p_LowQmPortal, myverb); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == myverb); res = p_Mcr->result; if (res != QM_MCR_RESULT_OK) { FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE,("INITFQ failed: %s", mcr_result_str(res))); } if (p_Mcc->initfq.we_mask & QM_INITFQ_WE_FQCTRL) { if (p_Mcc->initfq.fqd.fq_ctrl & QM_FQCTRL_CGE) p_Fq->flags |= QMAN_FQ_STATE_CGR_EN; else p_Fq->flags &= ~QMAN_FQ_STATE_CGR_EN; } if (p_Mcc->initfq.we_mask & QM_INITFQ_WE_CGID) p_Fq->cgr_groupid = p_Mcc->initfq.fqd.cgid; p_Fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? qman_fq_state_sched : qman_fq_state_parked; FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return E_OK; } static t_Error qman_retire_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq, uint32_t *p_Flags, bool drain) { struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; t_Error err = E_OK; uint8_t res; SANITY_CHECK_RETURN_ERROR((p_Fq->state == qman_fq_state_parked) || (p_Fq->state == qman_fq_state_sched), E_INVALID_STATE); if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY) return E_INVALID_VALUE; NCSW_PLOCK(p_QmPortal); FQLOCK(p_Fq); if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) || (p_Fq->state == qman_fq_state_retired) || (p_Fq->state == qman_fq_state_oos)) { err = E_BUSY; goto out; } p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->alterfq.fqid = p_Fq->fqid; if (drain) p_Mcc->alterfq.context_b = aligned_int_from_ptr(p_Fq); qm_mc_commit(p_QmPortal->p_LowQmPortal, (uint8_t)((drain)?QM_MCC_VERB_ALTER_RETIRE_CTXB:QM_MCC_VERB_ALTER_RETIRE)); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == (drain)?QM_MCR_VERB_ALTER_RETIRE_CTXB:QM_MCR_VERB_ALTER_RETIRE); res = p_Mcr->result; if (res == QM_MCR_RESULT_OK) { /* Process 'fq' right away, we'll ignore FQRNI */ if (p_Mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) p_Fq->flags |= QMAN_FQ_STATE_NE; if (p_Mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) p_Fq->flags |= QMAN_FQ_STATE_ORL; p_Fq->state = qman_fq_state_retired; } else if (res == QM_MCR_RESULT_PENDING) p_Fq->flags |= QMAN_FQ_STATE_CHANGING; else { XX_Print("ALTER_RETIRE failed: %s\n", mcr_result_str(res)); err = E_INVALID_STATE; } if (p_Flags) *p_Flags = p_Fq->flags; out: FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return err; } static t_Error qman_oos_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq) { struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; uint8_t res; ASSERT_COND(p_Fq->state == qman_fq_state_retired); if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY) return ERROR_CODE(E_INVALID_VALUE); NCSW_PLOCK(p_QmPortal); FQLOCK(p_Fq); if ((p_Fq->flags & QMAN_FQ_STATE_BLOCKOOS) || (p_Fq->state != qman_fq_state_retired)) { FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return ERROR_CODE(E_BUSY); } p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->alterfq.fqid = p_Fq->fqid; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_ALTER_OOS); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); res = p_Mcr->result; if (res != QM_MCR_RESULT_OK) { FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("ALTER_OOS failed: %s\n", mcr_result_str(res))); } p_Fq->state = qman_fq_state_oos; FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return E_OK; } static t_Error qman_schedule_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq) { struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; uint8_t res; ASSERT_COND(p_Fq->state == qman_fq_state_parked); if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY) return ERROR_CODE(E_INVALID_VALUE); /* Issue a ALTERFQ_SCHED management command */ NCSW_PLOCK(p_QmPortal); FQLOCK(p_Fq); if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) || (p_Fq->state != qman_fq_state_parked)) { FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return ERROR_CODE(E_BUSY); } p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->alterfq.fqid = p_Fq->fqid; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_ALTER_SCHED); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); res = p_Mcr->result; if (res != QM_MCR_RESULT_OK) { FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("ALTER_SCHED failed: %s\n", mcr_result_str(res))); } p_Fq->state = qman_fq_state_sched; FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return E_OK; } /* Inline helper to reduce nesting in LoopMessageRing() */ static __inline__ void fq_state_change(struct qman_fq *p_Fq, struct qm_mr_entry *p_Msg, uint8_t verb) { FQLOCK(p_Fq); switch(verb) { case QM_MR_VERB_FQRL: ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_ORL); p_Fq->flags &= ~QMAN_FQ_STATE_ORL; break; case QM_MR_VERB_FQRN: ASSERT_COND((p_Fq->state == qman_fq_state_parked) || (p_Fq->state == qman_fq_state_sched)); ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_CHANGING); p_Fq->flags &= ~QMAN_FQ_STATE_CHANGING; if (p_Msg->fq.fqs & QM_MR_FQS_NOTEMPTY) p_Fq->flags |= QMAN_FQ_STATE_NE; if (p_Msg->fq.fqs & QM_MR_FQS_ORLPRESENT) p_Fq->flags |= QMAN_FQ_STATE_ORL; p_Fq->state = qman_fq_state_retired; break; case QM_MR_VERB_FQPN: ASSERT_COND(p_Fq->state == qman_fq_state_sched); ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_CHANGING); p_Fq->state = qman_fq_state_parked; } FQUNLOCK(p_Fq); } static t_Error freeDrainedFq(struct qman_fq *p_Fq) { t_QmFqr *p_QmFqr; uint32_t i; ASSERT_COND(p_Fq); p_QmFqr = (t_QmFqr *)p_Fq->h_QmFqr; ASSERT_COND(p_QmFqr); ASSERT_COND(!p_QmFqr->p_DrainedFqs[p_Fq->fqidOffset]); p_QmFqr->p_DrainedFqs[p_Fq->fqidOffset] = TRUE; p_QmFqr->numOfDrainedFqids++; if (p_QmFqr->numOfDrainedFqids == p_QmFqr->numOfFqids) { for (i=0;inumOfFqids;i++) { if ((p_QmFqr->p_Fqs[i]->state == qman_fq_state_retired) && (qman_oos_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i]) != E_OK)) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_oos_fq() failed!")); qman_destroy_fq(p_QmFqr->p_Fqs[i], 0); XX_FreeSmart(p_QmFqr->p_Fqs[i]); } XX_Free(p_QmFqr->p_DrainedFqs); p_QmFqr->p_DrainedFqs = NULL; if (p_QmFqr->f_CompletionCB) { p_QmFqr->f_CompletionCB(p_QmFqr->h_App, p_QmFqr); XX_Free(p_QmFqr->p_Fqs); if (p_QmFqr->fqidBase) QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase); XX_Free(p_QmFqr); } } return E_OK; } static t_Error drainRetiredFq(struct qman_fq *p_Fq) { t_QmFqr *p_QmFqr; ASSERT_COND(p_Fq); p_QmFqr = (t_QmFqr *)p_Fq->h_QmFqr; ASSERT_COND(p_QmFqr); if (p_Fq->flags & QMAN_FQ_STATE_NE) { if (qman_volatile_dequeue(p_QmFqr->h_QmPortal, p_Fq, (QM_VDQCR_PRECEDENCE_VDQCR | QM_VDQCR_NUMFRAMES_TILLEMPTY)) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("drain with volatile failed")); return E_OK; } else return freeDrainedFq(p_Fq); } static e_RxStoreResponse drainCB(t_Handle h_App, t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame) { UNUSED(h_App); UNUSED(h_QmFqr); UNUSED(h_QmPortal); UNUSED(fqidOffset); UNUSED(p_Frame); DBG(TRACE,("got fd for fqid %d", ((t_QmFqr *)h_QmFqr)->fqidBase + fqidOffset)); return e_RX_STORE_RESPONSE_CONTINUE; } static void cb_ern_dcErn(t_Handle h_App, t_Handle h_QmPortal, struct qman_fq *p_Fq, const struct qm_mr_entry *p_Msg) { static int cnt = 0; UNUSED(p_Fq); UNUSED(p_Msg); UNUSED(h_App); UNUSED(h_QmPortal); XX_Print("cb_ern_dcErn_fqs() unimplemented %d\n", ++cnt); } static void cb_fqs(t_Handle h_App, t_Handle h_QmPortal, struct qman_fq *p_Fq, const struct qm_mr_entry *p_Msg) { UNUSED(p_Msg); UNUSED(h_App); UNUSED(h_QmPortal); if (p_Fq->state == qman_fq_state_retired && !(p_Fq->flags & QMAN_FQ_STATE_ORL)) drainRetiredFq(p_Fq); } static void null_cb_mr(t_Handle h_App, t_Handle h_QmPortal, struct qman_fq *p_Fq, const struct qm_mr_entry *p_Msg) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; UNUSED(p_Fq);UNUSED(h_App); if ((p_Msg->verb & QM_MR_VERB_DC_ERN) == QM_MR_VERB_DC_ERN) XX_Print("Ignoring unowned MR frame on cpu %d, dc-portal 0x%02x.\n", p_QmPortal->p_LowQmPortal->config.cpu,p_Msg->dcern.portal); else XX_Print("Ignoring unowned MR frame on cpu %d, verb 0x%02x.\n", p_QmPortal->p_LowQmPortal->config.cpu,p_Msg->verb); } static uint32_t LoopMessageRing(t_QmPortal *p_QmPortal, uint32_t is) { struct qm_mr_entry *p_Msg; if (is & QM_PIRQ_CSCI) { struct qm_mc_result *p_Mcr; struct qman_cgrs tmp; uint32_t mask; unsigned int i, j; NCSW_PLOCK(p_QmPortal); qm_mc_start(p_QmPortal->p_LowQmPortal); qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCONGESTION); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; /* cgrs[0] is the portal mask for its cg's, cgrs[1] is the previous state of cg's */ for (i = 0; i < QM_MAX_NUM_OF_CGS/32; i++) { /* get curent state */ tmp.q.__state[i] = p_Mcr->querycongestion.state.__state[i]; /* keep only cg's that are registered for this portal */ tmp.q.__state[i] &= p_QmPortal->cgrs[0].q.__state[i]; /* handle only cg's that changed their state from previous exception */ tmp.q.__state[i] ^= p_QmPortal->cgrs[1].q.__state[i]; /* update previous */ p_QmPortal->cgrs[1].q.__state[i] = p_Mcr->querycongestion.state.__state[i]; } PUNLOCK(p_QmPortal); /* if in interrupt */ /* call the callback routines for any CG with a changed state */ for (i = 0; i < QM_MAX_NUM_OF_CGS/32; i++) for(j=0, mask = 0x80000000; j<32 ; j++, mask>>=1) { if(tmp.q.__state[i] & mask) { t_QmCg *p_QmCg = (t_QmCg *)(p_QmPortal->cgsHandles[i*32 + j]); if(p_QmCg->f_Exception) p_QmCg->f_Exception(p_QmCg->h_App, e_QM_EX_CG_STATE_CHANGE); } } } if (is & QM_PIRQ_EQRI) { NCSW_PLOCK(p_QmPortal); qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal); qm_eqcr_set_ithresh(p_QmPortal->p_LowQmPortal, 0); PUNLOCK(p_QmPortal); } if (is & QM_PIRQ_MRI) { mr_loop: qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal); if (p_Msg) { struct qman_fq *p_FqFqs = ptr_from_aligned_int(p_Msg->fq.contextB); struct qman_fq *p_FqErn = ptr_from_aligned_int(p_Msg->ern.tag); uint8_t verb =(uint8_t)(p_Msg->verb & QM_MR_VERB_TYPE_MASK); t_QmRejectedFrameInfo rejectedFrameInfo; memset(&rejectedFrameInfo, 0, sizeof(t_QmRejectedFrameInfo)); if (!(verb & QM_MR_VERB_DC_ERN)) { switch(p_Msg->ern.rc) { case(QM_MR_RC_CGR_TAILDROP): rejectedFrameInfo.rejectionCode = e_QM_RC_CG_TAILDROP; rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid; break; case(QM_MR_RC_WRED): rejectedFrameInfo.rejectionCode = e_QM_RC_CG_WRED; rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid; break; case(QM_MR_RC_FQ_TAILDROP): rejectedFrameInfo.rejectionCode = e_QM_RC_FQ_TAILDROP; rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid; break; case(QM_MR_RC_ERROR): break; default: REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("Unknown rejection code")); } if (!p_FqErn) p_QmPortal->p_NullCB->ern(p_QmPortal->h_App, NULL, p_QmPortal, 0, (t_DpaaFD*)&p_Msg->ern.fd, &rejectedFrameInfo); else p_FqErn->cb.ern(p_FqErn->h_App, p_FqErn->h_QmFqr, p_QmPortal, p_FqErn->fqidOffset, (t_DpaaFD*)&p_Msg->ern.fd, &rejectedFrameInfo); } else if (verb == QM_MR_VERB_DC_ERN) { if (!p_FqErn) p_QmPortal->p_NullCB->dc_ern(NULL, p_QmPortal, NULL, p_Msg); else p_FqErn->cb.dc_ern(p_FqErn->h_App, p_QmPortal, p_FqErn, p_Msg); } else { if (verb == QM_MR_VERB_FQRNI) ; /* we drop FQRNIs on the floor */ else if (!p_FqFqs) p_QmPortal->p_NullCB->fqs(NULL, p_QmPortal, NULL, p_Msg); else if ((verb == QM_MR_VERB_FQRN) || (verb == QM_MR_VERB_FQRL) || (verb == QM_MR_VERB_FQPN)) { fq_state_change(p_FqFqs, p_Msg, verb); p_FqFqs->cb.fqs(p_FqFqs->h_App, p_QmPortal, p_FqFqs, p_Msg); } } qm_mr_next(p_QmPortal->p_LowQmPortal); qmPortalMrCciConsume(p_QmPortal->p_LowQmPortal, 1); goto mr_loop; } } return is & (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI); } static void LoopDequeueRing(t_Handle h_QmPortal) { struct qm_dqrr_entry *p_Dq; struct qman_fq *p_Fq; enum qman_cb_dqrr_result res = qman_cb_dqrr_consume; e_RxStoreResponse tmpRes; t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; int prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH); while (res != qman_cb_dqrr_pause) { if (prefetch) qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal); qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal); if (!p_Dq) break; p_Fq = ptr_from_aligned_int(p_Dq->contextB); if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) { /* We only set QMAN_FQ_STATE_NE when retiring, so we only need * to check for clearing it when doing volatile dequeues. It's * one less thing to check in the critical path (SDQCR). */ tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; /* Check for VDQCR completion */ if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR; if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY) { p_Fq->flags &= ~QMAN_FQ_STATE_NE; freeDrainedFq(p_Fq); } } else { /* Interpret 'dq' from the owner's perspective. */ /* use portal default handlers */ ASSERT_COND(p_Dq->fqid); if (p_Fq) { tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; else if (p_Fq->state == qman_fq_state_waiting_parked) res = qman_cb_dqrr_park; } else { tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App, NULL, p_QmPortal, p_Dq->fqid, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; } } /* Parking isn't possible unless HELDACTIVE was set. NB, * FORCEELIGIBLE implies HELDACTIVE, so we only need to * check for HELDACTIVE to cover both. */ ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || (res != qman_cb_dqrr_park)); if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) { /* Defer just means "skip it, I'll consume it myself later on" */ if (res != qman_cb_dqrr_defer) qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal, p_Dq, (res == qman_cb_dqrr_park)); qm_dqrr_next(p_QmPortal->p_LowQmPortal); } else { if (res == qman_cb_dqrr_park) /* The only thing to do for non-DCA is the park-request */ qm_dqrr_park_ci(p_QmPortal->p_LowQmPortal); qm_dqrr_next(p_QmPortal->p_LowQmPortal); qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1); } } } static void LoopDequeueRingDcaOptimized(t_Handle h_QmPortal) { struct qm_dqrr_entry *p_Dq; struct qman_fq *p_Fq; enum qman_cb_dqrr_result res = qman_cb_dqrr_consume; e_RxStoreResponse tmpRes; t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; while (res != qman_cb_dqrr_pause) { qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal); if (!p_Dq) break; p_Fq = ptr_from_aligned_int(p_Dq->contextB); if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) { /* We only set QMAN_FQ_STATE_NE when retiring, so we only need * to check for clearing it when doing volatile dequeues. It's * one less thing to check in the critical path (SDQCR). */ tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; /* Check for VDQCR completion */ if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR; if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY) { p_Fq->flags &= ~QMAN_FQ_STATE_NE; freeDrainedFq(p_Fq); } } else { /* Interpret 'dq' from the owner's perspective. */ /* use portal default handlers */ ASSERT_COND(p_Dq->fqid); if (p_Fq) { tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; else if (p_Fq->state == qman_fq_state_waiting_parked) res = qman_cb_dqrr_park; } else { tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App, NULL, p_QmPortal, p_Dq->fqid, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; } } /* Parking isn't possible unless HELDACTIVE was set. NB, * FORCEELIGIBLE implies HELDACTIVE, so we only need to * check for HELDACTIVE to cover both. */ ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || (res != qman_cb_dqrr_park)); /* Defer just means "skip it, I'll consume it myself later on" */ if (res != qman_cb_dqrr_defer) qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal, p_Dq, (res == qman_cb_dqrr_park)); qm_dqrr_next(p_QmPortal->p_LowQmPortal); } } static void LoopDequeueRingOptimized(t_Handle h_QmPortal) { struct qm_dqrr_entry *p_Dq; struct qman_fq *p_Fq; enum qman_cb_dqrr_result res = qman_cb_dqrr_consume; e_RxStoreResponse tmpRes; t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; while (res != qman_cb_dqrr_pause) { qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal); if (!p_Dq) break; p_Fq = ptr_from_aligned_int(p_Dq->contextB); if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) { /* We only set QMAN_FQ_STATE_NE when retiring, so we only need * to check for clearing it when doing volatile dequeues. It's * one less thing to check in the critical path (SDQCR). */ tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; /* Check for VDQCR completion */ if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR; if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY) { p_Fq->flags &= ~QMAN_FQ_STATE_NE; freeDrainedFq(p_Fq); } } else { /* Interpret 'dq' from the owner's perspective. */ /* use portal default handlers */ ASSERT_COND(p_Dq->fqid); if (p_Fq) { tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; else if (p_Fq->state == qman_fq_state_waiting_parked) res = qman_cb_dqrr_park; } else { tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App, NULL, p_QmPortal, p_Dq->fqid, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; } } /* Parking isn't possible unless HELDACTIVE was set. NB, * FORCEELIGIBLE implies HELDACTIVE, so we only need to * check for HELDACTIVE to cover both. */ ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || (res != qman_cb_dqrr_park)); if (res == qman_cb_dqrr_park) /* The only thing to do for non-DCA is the park-request */ qm_dqrr_park_ci(p_QmPortal->p_LowQmPortal); qm_dqrr_next(p_QmPortal->p_LowQmPortal); qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1); } } /* Portal interrupt handler */ static void portal_isr(void *ptr) { t_QmPortal *p_QmPortal = ptr; uint32_t event = 0; uint32_t enableEvents = qm_isr_enable_read(p_QmPortal->p_LowQmPortal); DBG(TRACE, ("software-portal %d got interrupt", p_QmPortal->p_LowQmPortal->config.cpu)); event |= (qm_isr_status_read(p_QmPortal->p_LowQmPortal) & enableEvents); qm_isr_status_clear(p_QmPortal->p_LowQmPortal, event); /* Only do fast-path handling if it's required */ if (/*(event & QM_PIRQ_DQRI) &&*/ (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ_FAST)) p_QmPortal->f_LoopDequeueRingCB(p_QmPortal); if (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ_SLOW) LoopMessageRing(p_QmPortal, event); } static t_Error qman_query_fq_np(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq, struct qm_mcr_queryfq_np *p_Np) { struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; uint8_t res; NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->queryfq_np.fqid = p_Fq->fqid; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ_NP); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); res = p_Mcr->result; if (res == QM_MCR_RESULT_OK) *p_Np = p_Mcr->queryfq_np; PUNLOCK(p_QmPortal); if (res != QM_MCR_RESULT_OK) RETURN_ERROR(MINOR, E_INVALID_STATE, ("QUERYFQ_NP failed: %s\n", mcr_result_str(res))); return E_OK; } static uint8_t QmCgGetCgId(t_Handle h_QmCg) { t_QmCg *p_QmCg = (t_QmCg *)h_QmCg; return p_QmCg->id; } static t_Error qm_new_fq(t_QmPortal *p_QmPortal, uint32_t fqid, uint32_t fqidOffset, uint32_t channel, uint32_t wqid, uint16_t count, uint32_t flags, t_QmFqrCongestionAvoidanceParams *p_CgParams, t_QmContextA *p_ContextA, t_QmContextB *p_ContextB, bool initParked, t_Handle h_QmFqr, struct qman_fq **p_Fqs) { struct qman_fq *p_Fq = NULL; struct qm_mcc_initfq fq_opts; uint32_t i; t_Error err = E_OK; int gap, tmp; uint32_t tmpA, tmpN, ta=0, tn=0, initFqFlag; ASSERT_COND(p_QmPortal); ASSERT_COND(count); for(i=0;icb.dqrr = p_QmPortal->f_DfltFrame; p_Fq->cb.ern = p_QmPortal->f_RejectedFrame; p_Fq->cb.dc_ern = cb_ern_dcErn; p_Fq->cb.fqs = cb_fqs; p_Fq->h_App = p_QmPortal->h_App; p_Fq->h_QmFqr = h_QmFqr; p_Fq->fqidOffset = fqidOffset; p_Fqs[i] = p_Fq; if ((err = qman_create_fq(p_QmPortal,(uint32_t)(fqid + i), 0, p_Fqs[i])) != E_OK) break; } if (err != E_OK) { for(i=0;ih_QmCg); /* CG OAC and FQ TD may not be configured at the same time. if both are required, than we configure CG first, and the FQ TD later - see below. */ fq_opts.fqd.cgid = QmCgGetCgId(p_CgParams->h_QmCg); fq_opts.we_mask |= QM_INITFQ_WE_CGID; if(p_CgParams->overheadAccountingLength) { fq_opts.we_mask |= QM_INITFQ_WE_OAC; fq_opts.we_mask &= ~QM_INITFQ_WE_TDTHRESH; fq_opts.fqd.td_thresh = (uint16_t)(QM_FQD_TD_THRESH_OAC_EN | p_CgParams->overheadAccountingLength); } } if((flags & QM_FQCTRL_TDE) && (!p_CgParams->overheadAccountingLength)) { ASSERT_COND(p_CgParams->fqTailDropThreshold); fq_opts.we_mask |= QM_INITFQ_WE_TDTHRESH; /* express thresh as ta*2^tn */ gap = (int)p_CgParams->fqTailDropThreshold; for (tmpA=0 ; tmpA<256; tmpA++ ) for (tmpN=0 ; tmpN<32; tmpN++ ) { tmp = ABS((int)(p_CgParams->fqTailDropThreshold - tmpA*(1<overheadAccountingLength)) initFqFlag = 0; else initFqFlag = (uint32_t)(initParked?0:QMAN_INITFQ_FLAG_SCHED); if ((err = qman_init_fq(p_QmPortal, p_Fqs[0], initFqFlag, &fq_opts)) != E_OK) { for(i=0;ioverheadAccountingLength)) { ASSERT_COND(p_CgParams->fqTailDropThreshold); fq_opts.we_mask = QM_INITFQ_WE_TDTHRESH; /* express thresh as ta*2^tn */ gap = (int)p_CgParams->fqTailDropThreshold; for (tmpA=0 ; tmpA<256; tmpA++ ) for (tmpN=0 ; tmpN<32; tmpN++ ) { tmp = ABS((int)(p_CgParams->fqTailDropThreshold - tmpA*(1<fqid += i; } return err; } static t_Error qm_free_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq) { uint32_t flags=0; if (qman_retire_fq(p_QmPortal, p_Fq, &flags, false) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed!")); if (flags & QMAN_FQ_STATE_CHANGING) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("fq %d currently in use, will be retired", p_Fq->fqid)); if (flags & QMAN_FQ_STATE_NE) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed;" \ "Frame Queue Not Empty, Need to dequeue")); if (qman_oos_fq(p_QmPortal, p_Fq) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_oos_fq() failed!")); qman_destroy_fq(p_Fq,0); return E_OK; } static void qman_disable_portal(t_QmPortal *p_QmPortal) { NCSW_PLOCK(p_QmPortal); if (!(p_QmPortal->disable_count++)) qm_dqrr_set_maxfill(p_QmPortal->p_LowQmPortal, 0); PUNLOCK(p_QmPortal); } /* quiesce SDQCR/VDQCR, then drain till h/w wraps up anything it * was doing (5ms is more than enough to ensure it's done). */ static void clean_dqrr_mr(t_QmPortal *p_QmPortal) { struct qm_dqrr_entry *p_Dq; struct qm_mr_entry *p_Msg; int idle = 0; qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, 0); qm_dqrr_vdqcr_set(p_QmPortal->p_LowQmPortal, 0); drain_loop: qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal); qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal); qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal); p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal); if (p_Dq) { qm_dqrr_next(p_QmPortal->p_LowQmPortal); qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1); } if (p_Msg) { qm_mr_next(p_QmPortal->p_LowQmPortal); qmPortalMrCciConsume(p_QmPortal->p_LowQmPortal, 1); } if (!p_Dq && !p_Msg) { if (++idle < 5) { XX_UDelay(1000); goto drain_loop; } } else { idle = 0; goto drain_loop; } } static t_Error qman_create_portal(t_QmPortal *p_QmPortal, uint32_t flags, uint32_t sdqcrFlags, uint8_t dqrrSize) { const struct qm_portal_config *p_Config = &(p_QmPortal->p_LowQmPortal->config); int ret = 0; t_Error err; uint32_t isdr; if ((err = qm_eqcr_init(p_QmPortal->p_LowQmPortal, e_QmPortalPVB, e_QmPortalEqcrCCE)) != E_OK) RETURN_ERROR(MINOR, err, ("Qman EQCR initialization failed\n")); if (qm_dqrr_init(p_QmPortal->p_LowQmPortal, sdqcrFlags ? e_QmPortalDequeuePushMode : e_QmPortalDequeuePullMode, e_QmPortalPVB, (flags & QMAN_PORTAL_FLAG_DCA) ? e_QmPortalDqrrDCA : e_QmPortalDqrrCCI, dqrrSize, (flags & QMAN_PORTAL_FLAG_RSTASH) ? 1 : 0, (flags & QMAN_PORTAL_FLAG_DSTASH) ? 1 : 0)) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("DQRR initialization failed")); goto fail_dqrr; } if (qm_mr_init(p_QmPortal->p_LowQmPortal, e_QmPortalPVB, e_QmPortalMrCCI)) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MR initialization failed")); goto fail_mr; } if (qm_mc_init(p_QmPortal->p_LowQmPortal)) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MC initialization failed")); goto fail_mc; } if (qm_isr_init(p_QmPortal->p_LowQmPortal)) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("ISR initialization failed")); goto fail_isr; } /* static interrupt-gating controls */ qm_dqrr_set_ithresh(p_QmPortal->p_LowQmPortal, 12); qm_mr_set_ithresh(p_QmPortal->p_LowQmPortal, 4); qm_isr_set_iperiod(p_QmPortal->p_LowQmPortal, 100); p_QmPortal->options = flags; isdr = 0xffffffff; qm_isr_status_clear(p_QmPortal->p_LowQmPortal, 0xffffffff); qm_isr_enable_write(p_QmPortal->p_LowQmPortal, DEFAULT_portalExceptions); qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr); if (flags & QMAN_PORTAL_FLAG_IRQ) { XX_SetIntr(p_Config->irq, portal_isr, p_QmPortal); XX_EnableIntr(p_Config->irq); qm_isr_uninhibit(p_QmPortal->p_LowQmPortal); } else /* without IRQ, we can't block */ flags &= ~QMAN_PORTAL_FLAG_WAIT; /* Need EQCR to be empty before continuing */ isdr ^= QM_PIRQ_EQCI; qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr); ret = qm_eqcr_get_fill(p_QmPortal->p_LowQmPortal); if (ret) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("EQCR unclean")); goto fail_eqcr_empty; } isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI); qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr); if (qm_dqrr_current(p_QmPortal->p_LowQmPortal) != NULL) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("DQRR unclean")); goto fail_dqrr_mr_empty; } if (qm_mr_current(p_QmPortal->p_LowQmPortal) != NULL) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MR unclean")); goto fail_dqrr_mr_empty; } qm_isr_disable_write(p_QmPortal->p_LowQmPortal, 0); qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, sdqcrFlags); return E_OK; fail_dqrr_mr_empty: fail_eqcr_empty: qm_isr_finish(p_QmPortal->p_LowQmPortal); fail_isr: qm_mc_finish(p_QmPortal->p_LowQmPortal); fail_mc: qm_mr_finish(p_QmPortal->p_LowQmPortal); fail_mr: qm_dqrr_finish(p_QmPortal->p_LowQmPortal); fail_dqrr: qm_eqcr_finish(p_QmPortal->p_LowQmPortal); return ERROR_CODE(E_INVALID_STATE); } static void qman_destroy_portal(t_QmPortal *p_QmPortal) { /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or * something related to QM_PIRQ_EQCI, this may need fixing. */ qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal); if (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ) { XX_DisableIntr(p_QmPortal->p_LowQmPortal->config.irq); XX_FreeIntr(p_QmPortal->p_LowQmPortal->config.irq); } qm_isr_finish(p_QmPortal->p_LowQmPortal); qm_mc_finish(p_QmPortal->p_LowQmPortal); qm_mr_finish(p_QmPortal->p_LowQmPortal); qm_dqrr_finish(p_QmPortal->p_LowQmPortal); qm_eqcr_finish(p_QmPortal->p_LowQmPortal); } static inline struct qm_eqcr_entry *try_eq_start(t_QmPortal *p_QmPortal) { struct qm_eqcr_entry *p_Eq; uint8_t avail; avail = qm_eqcr_get_avail(p_QmPortal->p_LowQmPortal); if (avail == EQCR_THRESH) qmPortalEqcrCcePrefetch(p_QmPortal->p_LowQmPortal); else if (avail < EQCR_THRESH) qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal); p_Eq = qm_eqcr_start(p_QmPortal->p_LowQmPortal); return p_Eq; } static t_Error qman_orp_update(t_QmPortal *p_QmPortal, uint32_t orpId, uint16_t orpSeqnum, uint32_t flags) { struct qm_eqcr_entry *p_Eq; NCSW_PLOCK(p_QmPortal); p_Eq = try_eq_start(p_QmPortal); if (!p_Eq) { PUNLOCK(p_QmPortal); return ERROR_CODE(E_BUSY); } if (flags & QMAN_ENQUEUE_FLAG_NESN) orpSeqnum |= QM_EQCR_SEQNUM_NESN; else /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */ orpSeqnum &= ~QM_EQCR_SEQNUM_NESN; p_Eq->seqnum = orpSeqnum; p_Eq->orp = orpId; qmPortalEqcrPvbCommit(p_QmPortal->p_LowQmPortal, (uint8_t)QM_EQCR_VERB_ORP); PUNLOCK(p_QmPortal); return E_OK; } static __inline__ t_Error CheckStashParams(t_QmFqrParams *p_QmFqrParams) { ASSERT_COND(p_QmFqrParams); if (p_QmFqrParams->stashingParams.frameAnnotationSize > QM_CONTEXTA_MAX_STASH_SIZE) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Annotation Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE)); if (p_QmFqrParams->stashingParams.frameDataSize > QM_CONTEXTA_MAX_STASH_SIZE) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Data Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE)); if (p_QmFqrParams->stashingParams.fqContextSize > QM_CONTEXTA_MAX_STASH_SIZE) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Context Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE)); if (p_QmFqrParams->stashingParams.fqContextSize) { if (!p_QmFqrParams->stashingParams.fqContextAddr) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address Must be givven")); if (!IS_ALIGNED(p_QmFqrParams->stashingParams.fqContextAddr, CACHELINE_SIZE)) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address Must be aligned to %d", CACHELINE_SIZE)); if (p_QmFqrParams->stashingParams.fqContextAddr & 0xffffff0000000000LL) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address May be up to 40 bit")); } return E_OK; } static t_Error QmPortalRegisterCg(t_Handle h_QmPortal, t_Handle h_QmCg, uint8_t cgId) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; /* cgrs[0] is the mask of registered CG's*/ if(p_QmPortal->cgrs[0].q.__state[cgId/32] & (0x80000000 >> (cgId % 32))) RETURN_ERROR(MINOR, E_BUSY, ("CG already used")); p_QmPortal->cgrs[0].q.__state[cgId/32] |= 0x80000000 >> (cgId % 32); p_QmPortal->cgsHandles[cgId] = h_QmCg; return E_OK; } static t_Error QmPortalUnregisterCg(t_Handle h_QmPortal, uint8_t cgId) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; /* cgrs[0] is the mask of registered CG's*/ if(!(p_QmPortal->cgrs[0].q.__state[cgId/32] & (0x80000000 >> (cgId % 32)))) RETURN_ERROR(MINOR, E_BUSY, ("CG is not in use")); p_QmPortal->cgrs[0].q.__state[cgId/32] &= ~0x80000000 >> (cgId % 32); p_QmPortal->cgsHandles[cgId] = NULL; return E_OK; } static e_DpaaSwPortal QmPortalGetSwPortalId(t_Handle h_QmPortal) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; return (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu; } static t_Error CalcWredCurve(t_QmCgWredCurve *p_WredCurve, uint32_t *p_CurveWord) { uint32_t maxP, roundDown, roundUp, tmpA, tmpN; uint32_t ma=0, mn=0, slope, sa=0, sn=0, pn; int pres = 1000; int gap, tmp; /* TODO - change maxTh to uint64_t? if(p_WredCurve->maxTh > (1<<39)) RETURN_ERROR(MINOR, E_INVALID_VALUE, ("maxTh is not in range"));*/ /* express maxTh as ma*2^mn */ gap = (int)p_WredCurve->maxTh; for (tmpA=0 ; tmpA<256; tmpA++ ) for (tmpN=0 ; tmpN<32; tmpN++ ) { tmp = ABS((int)(p_WredCurve->maxTh - tmpA*(1<maxTh = ma*(1<maxTh <= p_WredCurve->minTh) RETURN_ERROR(MINOR, E_INVALID_VALUE, ("maxTh must be larger than minTh")); if(p_WredCurve->probabilityDenominator > 64) RETURN_ERROR(MINOR, E_INVALID_VALUE, ("probabilityDenominator mustn't be 1-64")); /* first we translate from Cisco probabilityDenominator to 256 fixed denominator, result must be divisible by 4. */ /* we multiply by a fixed value to get better accuracy (without using floating point) */ maxP = (uint32_t)(256*1000/p_WredCurve->probabilityDenominator); if (maxP % 4*pres) { roundDown = maxP + (maxP % (4*pres)); roundUp = roundDown + 4*pres; if((roundUp - maxP) > (maxP - roundDown)) maxP = roundDown; else maxP = roundUp; } maxP = maxP/pres; ASSERT_COND(maxP <= 256); pn = (uint8_t)(maxP/4 - 1); if(maxP >= (p_WredCurve->maxTh - p_WredCurve->minTh)) RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Due to probabilityDenominator selected, maxTh-minTh must be larger than %d", maxP)); pres = 1000000; slope = maxP*pres/(p_WredCurve->maxTh - p_WredCurve->minTh); /* express slope as sa/2^sn */ gap = (int)slope; for (tmpA=(uint32_t)(64*pres) ; tmpA<128*pres; tmpA += pres ) for (tmpN=7 ; tmpN<64; tmpN++ ) { tmp = ABS((int)(slope - tmpA/(1<=64); sn = sn; ASSERT_COND(sn<64 && sn>=7); *p_CurveWord = ((ma << 24) | (mn << 19) | (sa << 12) | (sn << 6) | pn); return E_OK; } static t_Error QmPortalPullFrame(t_Handle h_QmPortal, uint32_t pdqcr, t_DpaaFD *p_Frame) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; struct qm_dqrr_entry *p_Dq; struct qman_fq *p_Fq; int prefetch; uint32_t *p_Dst, *p_Src; ASSERT_COND(p_QmPortal); ASSERT_COND(p_Frame); SANITY_CHECK_RETURN_ERROR(p_QmPortal->pullMode, E_INVALID_STATE); NCSW_PLOCK(p_QmPortal); qm_dqrr_pdqcr_set(p_QmPortal->p_LowQmPortal, pdqcr); mb(); while (qm_dqrr_pdqcr_get(p_QmPortal->p_LowQmPortal)) ; prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH); while(TRUE) { if (prefetch) qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal); qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal); if (!p_Dq) continue; p_Fq = ptr_from_aligned_int(p_Dq->contextB); ASSERT_COND(p_Dq->fqid); p_Dst = (uint32_t *)p_Frame; p_Src = (uint32_t *)&p_Dq->fd; p_Dst[0] = p_Src[0]; p_Dst[1] = p_Src[1]; p_Dst[2] = p_Src[2]; p_Dst[3] = p_Src[3]; if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) { qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal, p_Dq, false); qm_dqrr_next(p_QmPortal->p_LowQmPortal); } else { qm_dqrr_next(p_QmPortal->p_LowQmPortal); qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1); } break; } PUNLOCK(p_QmPortal); if (!(p_Dq->stat & QM_DQRR_STAT_FD_VALID)) return ERROR_CODE(E_EMPTY); return E_OK; } /****************************************/ /* API Init unit functions */ /****************************************/ t_Handle QM_PORTAL_Config(t_QmPortalParam *p_QmPortalParam) { t_QmPortal *p_QmPortal; uint32_t i; SANITY_CHECK_RETURN_VALUE(p_QmPortalParam, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(p_QmPortalParam->swPortalId < DPAA_MAX_NUM_OF_SW_PORTALS, E_INVALID_VALUE, 0); p_QmPortal = (t_QmPortal *)XX_Malloc(sizeof(t_QmPortal)); if (!p_QmPortal) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Qm Portal obj!!!")); return NULL; } memset(p_QmPortal, 0, sizeof(t_QmPortal)); p_QmPortal->p_LowQmPortal = (struct qm_portal *)XX_Malloc(sizeof(struct qm_portal)); if (!p_QmPortal->p_LowQmPortal) { XX_Free(p_QmPortal); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Low qm p_QmPortal obj!!!")); return NULL; } memset(p_QmPortal->p_LowQmPortal, 0, sizeof(struct qm_portal)); p_QmPortal->p_QmPortalDriverParams = (t_QmPortalDriverParams *)XX_Malloc(sizeof(t_QmPortalDriverParams)); if (!p_QmPortal->p_QmPortalDriverParams) { XX_Free(p_QmPortal->p_LowQmPortal); XX_Free(p_QmPortal); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Qm Portal driver parameters")); return NULL; } memset(p_QmPortal->p_QmPortalDriverParams, 0, sizeof(t_QmPortalDriverParams)); p_QmPortal->p_LowQmPortal->addr.addr_ce = UINT_TO_PTR(p_QmPortalParam->ceBaseAddress); p_QmPortal->p_LowQmPortal->addr.addr_ci = UINT_TO_PTR(p_QmPortalParam->ciBaseAddress); p_QmPortal->p_LowQmPortal->config.irq = p_QmPortalParam->irq; p_QmPortal->p_LowQmPortal->config.bound = 0; p_QmPortal->p_LowQmPortal->config.cpu = (int)p_QmPortalParam->swPortalId; p_QmPortal->p_LowQmPortal->config.channel = (e_QmFQChannel)(e_QM_FQ_CHANNEL_SWPORTAL0 + p_QmPortalParam->swPortalId); p_QmPortal->p_LowQmPortal->bind_lock = XX_InitSpinlock(); p_QmPortal->h_Qm = p_QmPortalParam->h_Qm; p_QmPortal->f_DfltFrame = p_QmPortalParam->f_DfltFrame; p_QmPortal->f_RejectedFrame = p_QmPortalParam->f_RejectedFrame; p_QmPortal->h_App = p_QmPortalParam->h_App; p_QmPortal->p_QmPortalDriverParams->fdLiodnOffset = p_QmPortalParam->fdLiodnOffset; p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode = DEFAULT_dequeueDcaMode; p_QmPortal->p_QmPortalDriverParams->dequeueUpToThreeFrames = DEFAULT_dequeueUpToThreeFrames; p_QmPortal->p_QmPortalDriverParams->commandType = DEFAULT_dequeueCommandType; p_QmPortal->p_QmPortalDriverParams->userToken = DEFAULT_dequeueUserToken; p_QmPortal->p_QmPortalDriverParams->specifiedWq = DEFAULT_dequeueSpecifiedWq; p_QmPortal->p_QmPortalDriverParams->dedicatedChannel = DEFAULT_dequeueDedicatedChannel; p_QmPortal->p_QmPortalDriverParams->dedicatedChannelHasPrecedenceOverPoolChannels = DEFAULT_dequeueDedicatedChannelHasPrecedenceOverPoolChannels; p_QmPortal->p_QmPortalDriverParams->poolChannelId = DEFAULT_dequeuePoolChannelId; p_QmPortal->p_QmPortalDriverParams->wqId = DEFAULT_dequeueWqId; for (i=0;ip_QmPortalDriverParams->poolChannels[i] = FALSE; p_QmPortal->p_QmPortalDriverParams->dqrrSize = DEFAULT_dqrrSize; p_QmPortal->p_QmPortalDriverParams->pullMode = DEFAULT_pullMode; return p_QmPortal; } t_Error QM_PORTAL_Init(t_Handle h_QmPortal) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; uint32_t i, flags=0, sdqcrFlags=0; t_Error err; t_QmInterModulePortalInitParams qmParams; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_INVALID_HANDLE); memset(&qmParams, 0, sizeof(qmParams)); qmParams.portalId = (uint8_t)p_QmPortal->p_LowQmPortal->config.cpu; qmParams.liodn = p_QmPortal->p_QmPortalDriverParams->fdLiodnOffset; qmParams.dqrrLiodn = p_QmPortal->p_QmPortalDriverParams->dqrrLiodn; qmParams.fdFqLiodn = p_QmPortal->p_QmPortalDriverParams->fdFqLiodn; qmParams.stashDestQueue = p_QmPortal->p_QmPortalDriverParams->stashDestQueue; if ((err = QmGetSetPortalParams(p_QmPortal->h_Qm, &qmParams)) != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); flags = (uint32_t)(((p_QmPortal->p_LowQmPortal->config.irq == NO_IRQ) ? 0 : (QMAN_PORTAL_FLAG_IRQ | QMAN_PORTAL_FLAG_IRQ_FAST | QMAN_PORTAL_FLAG_IRQ_SLOW))); flags |= ((p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode) ? QMAN_PORTAL_FLAG_DCA : 0); flags |= (p_QmPortal->p_QmPortalDriverParams->dqrr)?QMAN_PORTAL_FLAG_RSTASH:0; flags |= (p_QmPortal->p_QmPortalDriverParams->fdFq)?QMAN_PORTAL_FLAG_DSTASH:0; p_QmPortal->pullMode = p_QmPortal->p_QmPortalDriverParams->pullMode; if (!p_QmPortal->pullMode) { sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dequeueUpToThreeFrames) ? QM_SDQCR_COUNT_UPTO3 : QM_SDQCR_COUNT_EXACT1; sdqcrFlags |= QM_SDQCR_TOKEN_SET(p_QmPortal->p_QmPortalDriverParams->userToken); sdqcrFlags |= QM_SDQCR_TYPE_SET(p_QmPortal->p_QmPortalDriverParams->commandType); if (!p_QmPortal->p_QmPortalDriverParams->specifiedWq) { /* sdqcrFlags |= QM_SDQCR_SOURCE_CHANNELS;*/ /* removed as the macro is '0' */ sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannelHasPrecedenceOverPoolChannels) ? QM_SDQCR_DEDICATED_PRECEDENCE : 0; sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannel) ? QM_SDQCR_CHANNELS_DEDICATED : 0; for (i=0;ip_QmPortalDriverParams->poolChannels[i]) ? QM_SDQCR_CHANNELS_POOL(i+1) : 0); } else { sdqcrFlags |= QM_SDQCR_SOURCE_SPECIFICWQ; sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannel) ? QM_SDQCR_SPECIFICWQ_DEDICATED : QM_SDQCR_SPECIFICWQ_POOL(p_QmPortal->p_QmPortalDriverParams->poolChannelId); sdqcrFlags |= QM_SDQCR_SPECIFICWQ_WQ(p_QmPortal->p_QmPortalDriverParams->wqId); } } if ((flags & QMAN_PORTAL_FLAG_RSTASH) && (flags & QMAN_PORTAL_FLAG_DCA)) p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRingDcaOptimized; else if ((flags & QMAN_PORTAL_FLAG_RSTASH) && !(flags & QMAN_PORTAL_FLAG_DCA)) p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRingOptimized; else p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRing; if ((!p_QmPortal->f_RejectedFrame) || (!p_QmPortal->f_DfltFrame)) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_RejectedFrame or f_DfltFrame callback not provided")); p_QmPortal->p_NullCB = (struct qman_fq_cb *)XX_Malloc(sizeof(struct qman_fq_cb)); if (!p_QmPortal->p_NullCB) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FQ Null CB obj!!!")); memset(p_QmPortal->p_NullCB, 0, sizeof(struct qman_fq_cb)); p_QmPortal->p_NullCB->dqrr = p_QmPortal->f_DfltFrame; p_QmPortal->p_NullCB->ern = p_QmPortal->f_RejectedFrame; p_QmPortal->p_NullCB->dc_ern = p_QmPortal->p_NullCB->fqs = null_cb_mr; if (qman_create_portal(p_QmPortal, flags, sdqcrFlags, p_QmPortal->p_QmPortalDriverParams->dqrrSize) != E_OK) { RETURN_ERROR(MAJOR, E_NO_MEMORY, ("create portal failed")); } QmSetPortalHandle(p_QmPortal->h_Qm, (t_Handle)p_QmPortal, (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu); XX_Free(p_QmPortal->p_QmPortalDriverParams); p_QmPortal->p_QmPortalDriverParams = NULL; DBG(TRACE, ("Qman-Portal %d @ %p:%p", p_QmPortal->p_LowQmPortal->config.cpu, p_QmPortal->p_LowQmPortal->addr.addr_ce, p_QmPortal->p_LowQmPortal->addr.addr_ci )); DBG(TRACE, ("Qman-Portal %d phys @ 0x%016llx:0x%016llx", p_QmPortal->p_LowQmPortal->config.cpu, (uint64_t)XX_VirtToPhys(p_QmPortal->p_LowQmPortal->addr.addr_ce), (uint64_t)XX_VirtToPhys(p_QmPortal->p_LowQmPortal->addr.addr_ci) )); return E_OK; } t_Error QM_PORTAL_Free(t_Handle h_QmPortal) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; if (!p_QmPortal) return ERROR_CODE(E_INVALID_HANDLE); ASSERT_COND(p_QmPortal->p_LowQmPortal); QmSetPortalHandle(p_QmPortal->h_Qm, NULL, (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu); qman_destroy_portal(p_QmPortal); if (p_QmPortal->p_NullCB) XX_Free(p_QmPortal->p_NullCB); if (p_QmPortal->p_LowQmPortal->bind_lock) XX_FreeSpinlock(p_QmPortal->p_LowQmPortal->bind_lock); if(p_QmPortal->p_QmPortalDriverParams) XX_Free(p_QmPortal->p_QmPortalDriverParams); XX_Free(p_QmPortal->p_LowQmPortal); XX_Free(p_QmPortal); return E_OK; } t_Error QM_PORTAL_ConfigDcaMode(t_Handle h_QmPortal, bool enable) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_INVALID_HANDLE); p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode = enable; return E_OK; } t_Error QM_PORTAL_ConfigStash(t_Handle h_QmPortal, t_QmPortalStashParam *p_StashParams) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_StashParams, E_NULL_POINTER); p_QmPortal->p_QmPortalDriverParams->stashDestQueue = p_StashParams->stashDestQueue; p_QmPortal->p_QmPortalDriverParams->dqrrLiodn = p_StashParams->dqrrLiodn; p_QmPortal->p_QmPortalDriverParams->fdFqLiodn = p_StashParams->fdFqLiodn; p_QmPortal->p_QmPortalDriverParams->eqcr = p_StashParams->eqcr; p_QmPortal->p_QmPortalDriverParams->eqcrHighPri = p_StashParams->eqcrHighPri; p_QmPortal->p_QmPortalDriverParams->dqrr = p_StashParams->dqrr; p_QmPortal->p_QmPortalDriverParams->dqrrHighPri = p_StashParams->dqrrHighPri; p_QmPortal->p_QmPortalDriverParams->fdFq = p_StashParams->fdFq; p_QmPortal->p_QmPortalDriverParams->fdFqHighPri = p_StashParams->fdFqHighPri; p_QmPortal->p_QmPortalDriverParams->fdFqDrop = p_StashParams->fdFqDrop; return E_OK; } t_Error QM_PORTAL_ConfigPullMode(t_Handle h_QmPortal, bool pullMode) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_NULL_POINTER); p_QmPortal->p_QmPortalDriverParams->pullMode = pullMode; return E_OK; } t_Error QM_PORTAL_AddPoolChannel(t_Handle h_QmPortal, uint8_t poolChannelId) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; uint32_t sdqcrFlags; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR((poolChannelId < QM_MAX_NUM_OF_POOL_CHANNELS), E_INVALID_VALUE); sdqcrFlags = qm_dqrr_sdqcr_get(p_QmPortal->p_LowQmPortal); sdqcrFlags |= QM_SDQCR_CHANNELS_POOL(poolChannelId+1); qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, sdqcrFlags); return E_OK; } t_Error QM_PORTAL_Poll(t_Handle h_QmPortal, e_QmPortalPollSource source) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); NCSW_PLOCK(p_QmPortal); if ((source == e_QM_PORTAL_POLL_SOURCE_CONTROL_FRAMES) || (source == e_QM_PORTAL_POLL_SOURCE_BOTH)) { uint32_t is = qm_isr_status_read(p_QmPortal->p_LowQmPortal); uint32_t active = LoopMessageRing(p_QmPortal, is); if (active) qm_isr_status_clear(p_QmPortal->p_LowQmPortal, active); } if ((source == e_QM_PORTAL_POLL_SOURCE_DATA_FRAMES) || (source == e_QM_PORTAL_POLL_SOURCE_BOTH)) p_QmPortal->f_LoopDequeueRingCB((t_Handle)p_QmPortal); PUNLOCK(p_QmPortal); return E_OK; } t_Error QM_PORTAL_PollFrame(t_Handle h_QmPortal, t_QmPortalFrameInfo *p_frameInfo) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; struct qm_dqrr_entry *p_Dq; struct qman_fq *p_Fq; int prefetch; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_frameInfo, E_NULL_POINTER); NCSW_PLOCK(p_QmPortal); prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH); if (prefetch) qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal); qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal); if (!p_Dq) { PUNLOCK(p_QmPortal); return ERROR_CODE(E_EMPTY); } p_Fq = ptr_from_aligned_int(p_Dq->contextB); ASSERT_COND(p_Dq->fqid); if (p_Fq) { p_frameInfo->h_App = p_Fq->h_App; p_frameInfo->h_QmFqr = p_Fq->h_QmFqr; p_frameInfo->fqidOffset = p_Fq->fqidOffset; memcpy((void*)&p_frameInfo->frame, (void*)&p_Dq->fd, sizeof(t_DpaaFD)); } else { p_frameInfo->h_App = p_QmPortal->h_App; p_frameInfo->h_QmFqr = NULL; p_frameInfo->fqidOffset = p_Dq->fqid; memcpy((void*)&p_frameInfo->frame, (void*)&p_Dq->fd, sizeof(t_DpaaFD)); } if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) { qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal, p_Dq, false); qm_dqrr_next(p_QmPortal->p_LowQmPortal); } else { qm_dqrr_next(p_QmPortal->p_LowQmPortal); qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1); } PUNLOCK(p_QmPortal); return E_OK; } t_Handle QM_FQR_Create(t_QmFqrParams *p_QmFqrParams) { t_QmFqr *p_QmFqr; uint32_t i, flags = 0; u_QmFqdContextA cnxtA; SANITY_CHECK_RETURN_VALUE(p_QmFqrParams, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(p_QmFqrParams->h_Qm, E_INVALID_HANDLE, NULL); if (p_QmFqrParams->shadowMode && (!p_QmFqrParams->useForce || p_QmFqrParams->numOfFqids != 1)) { REPORT_ERROR(MAJOR, E_CONFLICT, ("shadowMode must be use with useForce and numOfFqids==1!!!")); return NULL; } p_QmFqr = (t_QmFqr *)XX_MallocSmart(sizeof(t_QmFqr), 0, 64); if (!p_QmFqr) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM FQR obj!!!")); return NULL; } memset(p_QmFqr, 0, sizeof(t_QmFqr)); p_QmFqr->h_Qm = p_QmFqrParams->h_Qm; p_QmFqr->h_QmPortal = p_QmFqrParams->h_QmPortal; p_QmFqr->shadowMode = p_QmFqrParams->shadowMode; p_QmFqr->numOfFqids = (p_QmFqrParams->useForce && !p_QmFqrParams->numOfFqids) ? 1 : p_QmFqrParams->numOfFqids; if (!p_QmFqr->h_QmPortal) { p_QmFqr->h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm); SANITY_CHECK_RETURN_VALUE(p_QmFqr->h_QmPortal, E_INVALID_HANDLE, NULL); } p_QmFqr->p_Fqs = (struct qman_fq **)XX_Malloc(sizeof(struct qman_fq *) * p_QmFqr->numOfFqids); if (!p_QmFqr->p_Fqs) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM FQs obj!!!")); QM_FQR_Free(p_QmFqr); return NULL; } memset(p_QmFqr->p_Fqs, 0, sizeof(struct qman_fq *) * p_QmFqr->numOfFqids); if (p_QmFqr->shadowMode) { struct qman_fq *p_Fq = NULL; p_QmFqr->fqidBase = p_QmFqrParams->qs.frcQ.fqid; p_Fq = (struct qman_fq *)XX_MallocSmart(sizeof(struct qman_fq), 0, 64); if (!p_Fq) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!")); QM_FQR_Free(p_QmFqr); return NULL; } memset(p_Fq, 0, sizeof(struct qman_fq)); p_Fq->cb.dqrr = ((t_QmPortal*)p_QmFqr->h_QmPortal)->f_DfltFrame; p_Fq->cb.ern = ((t_QmPortal*)p_QmFqr->h_QmPortal)->f_RejectedFrame; p_Fq->cb.dc_ern = cb_ern_dcErn; p_Fq->cb.fqs = cb_fqs; p_Fq->h_App = ((t_QmPortal*)p_QmFqr->h_QmPortal)->h_App; p_Fq->h_QmFqr = p_QmFqr; p_Fq->state = qman_fq_state_sched; p_Fq->fqid = p_QmFqr->fqidBase; p_QmFqr->p_Fqs[0] = p_Fq; } else { p_QmFqr->channel = p_QmFqrParams->channel; p_QmFqr->workQueue = p_QmFqrParams->wq; p_QmFqr->fqidBase = QmFqidGet(p_QmFqr->h_Qm, p_QmFqr->numOfFqids, p_QmFqrParams->qs.nonFrcQs.align, p_QmFqrParams->useForce, p_QmFqrParams->qs.frcQ.fqid); if (p_QmFqr->fqidBase == (uint32_t)ILLEGAL_BASE) { REPORT_ERROR(CRITICAL,E_INVALID_STATE,("can't allocate a fqid")); QM_FQR_Free(p_QmFqr); return NULL; } if(p_QmFqrParams->congestionAvoidanceEnable && (p_QmFqrParams->congestionAvoidanceParams.h_QmCg == NULL) && (p_QmFqrParams->congestionAvoidanceParams.fqTailDropThreshold == 0)) { REPORT_ERROR(CRITICAL,E_INVALID_STATE,("NULL congestion group handle and no FQ Threshold")); QM_FQR_Free(p_QmFqr); return NULL; } if(p_QmFqrParams->congestionAvoidanceEnable) { if(p_QmFqrParams->congestionAvoidanceParams.h_QmCg) flags |= QM_FQCTRL_CGE; if(p_QmFqrParams->congestionAvoidanceParams.fqTailDropThreshold) flags |= QM_FQCTRL_TDE; } /* flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_ORP : 0; flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_CPCSTASH : 0; flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_FORCESFDR : 0; flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_AVOIDBLOCK : 0; */ flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_HOLDACTIVE : 0; flags |= (p_QmFqrParams->preferInCache) ? QM_FQCTRL_LOCKINCACHE : 0; if (p_QmFqrParams->useContextAForStash) { if (CheckStashParams(p_QmFqrParams) != E_OK) { REPORT_ERROR(CRITICAL,E_INVALID_STATE,NO_MSG); QM_FQR_Free(p_QmFqr); return NULL; } memset(&cnxtA, 0, sizeof(cnxtA)); cnxtA.stashing.annotation_cl = DIV_CEIL(p_QmFqrParams->stashingParams.frameAnnotationSize, CACHELINE_SIZE); cnxtA.stashing.data_cl = DIV_CEIL(p_QmFqrParams->stashingParams.frameDataSize, CACHELINE_SIZE); cnxtA.stashing.context_cl = DIV_CEIL(p_QmFqrParams->stashingParams.fqContextSize, CACHELINE_SIZE); cnxtA.context_hi = (uint8_t)((p_QmFqrParams->stashingParams.fqContextAddr >> 32) & 0xff); cnxtA.context_lo = (uint32_t)(p_QmFqrParams->stashingParams.fqContextAddr); flags |= QM_FQCTRL_CTXASTASHING; } for(i=0;inumOfFqids;i++) if (qm_new_fq(p_QmFqr->h_QmPortal, p_QmFqr->fqidBase+i, i, p_QmFqr->channel, p_QmFqr->workQueue, 1/*p_QmFqr->numOfFqids*/, flags, (p_QmFqrParams->congestionAvoidanceEnable ? &p_QmFqrParams->congestionAvoidanceParams : NULL), p_QmFqrParams->useContextAForStash ? (t_QmContextA *)&cnxtA : p_QmFqrParams->p_ContextA, p_QmFqrParams->p_ContextB, p_QmFqrParams->initParked, p_QmFqr, &p_QmFqr->p_Fqs[i]) != E_OK) { QM_FQR_Free(p_QmFqr); return NULL; } } return p_QmFqr; } t_Error QM_FQR_Free(t_Handle h_QmFqr) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; uint32_t i; if (!p_QmFqr) return ERROR_CODE(E_INVALID_HANDLE); if (p_QmFqr->p_Fqs) { for (i=0;inumOfFqids;i++) if (p_QmFqr->p_Fqs[i]) { if (!p_QmFqr->shadowMode) qm_free_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i]); XX_FreeSmart(p_QmFqr->p_Fqs[i]); } XX_Free(p_QmFqr->p_Fqs); } if (!p_QmFqr->shadowMode && p_QmFqr->fqidBase) QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase); XX_FreeSmart(p_QmFqr); return E_OK; } t_Error QM_FQR_FreeWDrain(t_Handle h_QmFqr, t_QmFqrDrainedCompletionCB *f_CompletionCB, bool deliverFrame, t_QmReceivedFrameCallback *f_CallBack, t_Handle h_App) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; uint32_t i; if (!p_QmFqr) return ERROR_CODE(E_INVALID_HANDLE); if (p_QmFqr->shadowMode) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("QM_FQR_FreeWDrain can't be called to shadow FQR!!!. call QM_FQR_Free")); p_QmFqr->p_DrainedFqs = (bool *)XX_Malloc(sizeof(bool) * p_QmFqr->numOfFqids); if (!p_QmFqr->p_DrainedFqs) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("QM Drained-FQs obj!!!. Try to Free without draining")); memset(p_QmFqr->p_DrainedFqs, 0, sizeof(bool) * p_QmFqr->numOfFqids); if (f_CompletionCB) { p_QmFqr->f_CompletionCB = f_CompletionCB; p_QmFqr->h_App = h_App; } if (deliverFrame) { if (!f_CallBack) { REPORT_ERROR(MAJOR, E_NULL_POINTER, ("f_CallBack must be given.")); XX_Free(p_QmFqr->p_DrainedFqs); return ERROR_CODE(E_NULL_POINTER); } QM_FQR_RegisterCB(p_QmFqr, f_CallBack, h_App); } else QM_FQR_RegisterCB(p_QmFqr, drainCB, h_App); for (i=0;inumOfFqids;i++) { if (qman_retire_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i], 0, true) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed!")); if (p_QmFqr->p_Fqs[i]->flags & QMAN_FQ_STATE_CHANGING) DBG(INFO, ("fq %d currently in use, will be retired", p_QmFqr->p_Fqs[i]->fqid)); else drainRetiredFq(p_QmFqr->p_Fqs[i]); } if (!p_QmFqr->f_CompletionCB) { while(p_QmFqr->p_DrainedFqs) ; DBG(TRACE, ("QM-FQR with base %d completed", p_QmFqr->fqidBase)); XX_FreeSmart(p_QmFqr->p_Fqs); if (p_QmFqr->fqidBase) QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase); XX_FreeSmart(p_QmFqr); } return E_OK; } t_Error QM_FQR_RegisterCB(t_Handle h_QmFqr, t_QmReceivedFrameCallback *f_CallBack, t_Handle h_App) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; int i; SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE); for (i=0;inumOfFqids;i++) { p_QmFqr->p_Fqs[i]->cb.dqrr = f_CallBack; p_QmFqr->p_Fqs[i]->h_App = h_App; } return E_OK; } t_Error QM_FQR_Enqueue(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; t_QmPortal *p_QmPortal; struct qm_eqcr_entry *p_Eq; uint32_t *p_Dst, *p_Src; const struct qman_fq *p_Fq; SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE); if (!h_QmPortal) { SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE); h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm); SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE); } p_QmPortal = (t_QmPortal *)h_QmPortal; p_Fq = p_QmFqr->p_Fqs[fqidOffset]; #ifdef QM_CHECKING if (p_Fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG); if ((!(p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)) && ((p_Fq->state == qman_fq_state_retired) || (p_Fq->state == qman_fq_state_oos))) return ERROR_CODE(E_BUSY); #endif /* QM_CHECKING */ NCSW_PLOCK(p_QmPortal); p_Eq = try_eq_start(p_QmPortal); if (!p_Eq) { PUNLOCK(p_QmPortal); return ERROR_CODE(E_BUSY); } p_Eq->fqid = p_Fq->fqid; p_Eq->tag = aligned_int_from_ptr(p_Fq); /* gcc does a dreadful job of the following; * eq->fd = *fd; * It causes the entire function to save/restore a wider range of * registers, and comes up with instruction-waste galore. This will do * until we can rework the function for better code-generation. */ p_Dst = (uint32_t *)&p_Eq->fd; p_Src = (uint32_t *)p_Frame; p_Dst[0] = p_Src[0]; p_Dst[1] = p_Src[1]; p_Dst[2] = p_Src[2]; p_Dst[3] = p_Src[3]; qmPortalEqcrPvbCommit(p_QmPortal->p_LowQmPortal, (uint8_t)(QM_EQCR_VERB_CMD_ENQUEUE/* | (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))*/)); PUNLOCK(p_QmPortal); return E_OK; } t_Error QM_FQR_PullFrame(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; uint32_t pdqcr = 0; SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE); SANITY_CHECK_RETURN_ERROR(p_Frame, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR((p_QmFqr->p_Fqs[fqidOffset]->state == qman_fq_state_oos) || (p_QmFqr->p_Fqs[fqidOffset]->state == qman_fq_state_parked), E_INVALID_STATE); if (!h_QmPortal) { SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE); h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm); SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE); } pdqcr |= QM_PDQCR_MODE_UNSCHEDULED; pdqcr |= QM_PDQCR_FQID(p_QmFqr->p_Fqs[fqidOffset]->fqid); return QmPortalPullFrame(h_QmPortal, pdqcr, p_Frame); } t_Error QM_FQR_Resume(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE); if (!h_QmPortal) { SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE); h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm); SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE); } return qman_schedule_fq(h_QmPortal, p_QmFqr->p_Fqs[fqidOffset]); } t_Error QM_FQR_Suspend(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE); SANITY_CHECK_RETURN_ERROR((p_QmFqr->p_Fqs[fqidOffset]->flags & QM_FQCTRL_HOLDACTIVE), E_INVALID_STATE); UNUSED(h_QmPortal); p_QmFqr->p_Fqs[fqidOffset]->state = qman_fq_state_waiting_parked; return E_OK; } uint32_t QM_FQR_GetFqid(t_Handle h_QmFqr) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; SANITY_CHECK_RETURN_VALUE(p_QmFqr, E_INVALID_HANDLE, 0); return p_QmFqr->fqidBase; } uint32_t QM_FQR_GetCounter(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, e_QmFqrCounters counter) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; struct qm_mcr_queryfq_np queryfq_np; SANITY_CHECK_RETURN_VALUE(p_QmFqr, E_INVALID_HANDLE, 0); SANITY_CHECK_RETURN_VALUE((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE, 0); if (!h_QmPortal) { SANITY_CHECK_RETURN_VALUE(p_QmFqr->h_Qm, E_INVALID_HANDLE, 0); h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm); SANITY_CHECK_RETURN_VALUE(h_QmPortal, E_INVALID_HANDLE, 0); } if (qman_query_fq_np(h_QmPortal, p_QmFqr->p_Fqs[fqidOffset], &queryfq_np) != E_OK) return 0; switch (counter) { case e_QM_FQR_COUNTERS_FRAME : return queryfq_np.frm_cnt; case e_QM_FQR_COUNTERS_BYTE : return queryfq_np.byte_cnt; default : break; } /* should never get here */ ASSERT_COND(FALSE); return 0; } t_Handle QM_CG_Create(t_QmCgParams *p_CgParams) { t_QmCg *p_QmCg; t_QmPortal *p_QmPortal; t_Error err; uint32_t wredParams; uint32_t tmpA, tmpN, ta=0, tn=0; int gap, tmp; struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; SANITY_CHECK_RETURN_VALUE(p_CgParams, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(p_CgParams->h_Qm, E_INVALID_HANDLE, NULL); if(p_CgParams->notifyDcPortal && ((p_CgParams->dcPortalId == e_DPAA_DCPORTAL2) || (p_CgParams->dcPortalId == e_DPAA_DCPORTAL3))) { REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("notifyDcPortal is invalid for this DC Portal")); return NULL; } if (!p_CgParams->h_QmPortal) { p_QmPortal = QmGetPortalHandle(p_CgParams->h_Qm); SANITY_CHECK_RETURN_VALUE(p_QmPortal, E_INVALID_STATE, NULL); } else p_QmPortal = p_CgParams->h_QmPortal; p_QmCg = (t_QmCg *)XX_Malloc(sizeof(t_QmCg)); if (!p_QmCg) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM CG obj!!!")); return NULL; } memset(p_QmCg, 0, sizeof(t_QmCg)); /* build CG struct */ p_QmCg->h_Qm = p_CgParams->h_Qm; p_QmCg->h_QmPortal = p_QmPortal; p_QmCg->h_App = p_CgParams->h_App; err = QmGetCgId(p_CgParams->h_Qm, &p_QmCg->id); if (err) { XX_Free(p_QmCg); REPORT_ERROR(MAJOR, E_INVALID_STATE, ("QmGetCgId failed")); return NULL; } NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; err = QmPortalRegisterCg(p_QmPortal, p_QmCg, p_QmCg->id); if (err) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MAJOR, E_INVALID_STATE, ("QmPortalRegisterCg failed")); return NULL; } /* Build CGR command */ { #ifdef QM_CGS_NO_FRAME_MODE t_QmRevisionInfo revInfo; QmGetRevision(p_QmCg->h_Qm, &revInfo); if (!((revInfo.majorRev == 1) && (revInfo.minorRev == 0))) #endif /* QM_CGS_NO_FRAME_MODE */ if (p_CgParams->frameCount) { p_Mcc->initcgr.we_mask |= QM_CGR_WE_MODE; p_Mcc->initcgr.cgr.frame_mode = QM_CGR_EN; } } if (p_CgParams->wredEnable) { if (p_CgParams->wredParams.enableGreen) { err = CalcWredCurve(&p_CgParams->wredParams.greenCurve, &wredParams); if(err) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MAJOR, err, NO_MSG); return NULL; } p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_G | QM_CGR_WE_WR_PARM_G; p_Mcc->initcgr.cgr.wr_en_g = QM_CGR_EN; p_Mcc->initcgr.cgr.wr_parm_g.word = wredParams; } if (p_CgParams->wredParams.enableYellow) { err = CalcWredCurve(&p_CgParams->wredParams.yellowCurve, &wredParams); if(err) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MAJOR, err, NO_MSG); return NULL; } p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_Y | QM_CGR_WE_WR_PARM_Y; p_Mcc->initcgr.cgr.wr_en_y = QM_CGR_EN; p_Mcc->initcgr.cgr.wr_parm_y.word = wredParams; } if (p_CgParams->wredParams.enableRed) { err = CalcWredCurve(&p_CgParams->wredParams.redCurve, &wredParams); if(err) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MAJOR, err, NO_MSG); return NULL; } p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_R | QM_CGR_WE_WR_PARM_R; p_Mcc->initcgr.cgr.wr_en_r = QM_CGR_EN; p_Mcc->initcgr.cgr.wr_parm_r.word = wredParams; } } if (p_CgParams->tailDropEnable) { if (!p_CgParams->threshold) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MINOR, E_INVALID_STATE, ("tailDropThreshold must be configured if tailDropEnable ")); return NULL; } p_Mcc->initcgr.cgr.cstd_en = QM_CGR_EN; p_Mcc->initcgr.we_mask |= QM_CGR_WE_CSTD_EN; } if (p_CgParams->threshold) { p_Mcc->initcgr.we_mask |= QM_CGR_WE_CS_THRES; p_QmCg->f_Exception = p_CgParams->f_Exception; if (p_QmCg->f_Exception || p_CgParams->notifyDcPortal) { p_Mcc->initcgr.cgr.cscn_en = QM_CGR_EN; p_Mcc->initcgr.we_mask |= QM_CGR_WE_CSCN_EN | QM_CGR_WE_CSCN_TARG; /* if SW - set target, if HW - if FM, set HW target, otherwize, set SW target */ p_Mcc->initcgr.cgr.cscn_targ = 0; if (p_QmCg->f_Exception) p_Mcc->initcgr.cgr.cscn_targ = (uint32_t)QM_CGR_TARGET_SWP(QmPortalGetSwPortalId(p_QmCg->h_QmPortal)); if (p_CgParams->notifyDcPortal) p_Mcc->initcgr.cgr.cscn_targ |= (uint32_t)QM_CGR_TARGET_DCP(p_CgParams->dcPortalId); } /* express thresh as ta*2^tn */ gap = (int)p_CgParams->threshold; for (tmpA=0 ; tmpA<256; tmpA++ ) for (tmpN=0 ; tmpN<32; tmpN++ ) { tmp = ABS((int)(p_CgParams->threshold - tmpA*(1<initcgr.cgr.cs_thres.TA = ta; p_Mcc->initcgr.cgr.cs_thres.Tn = tn; } else if(p_CgParams->f_Exception) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MINOR, E_INVALID_STATE, ("No threshold configured, but f_Exception defined")); return NULL; } qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_INITCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_INITCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result))); return NULL; } PUNLOCK(p_QmPortal); return p_QmCg; } t_Error QM_CG_Free(t_Handle h_QmCg) { t_QmCg *p_QmCg = (t_QmCg *)h_QmCg; t_Error err; struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; t_QmPortal *p_QmPortal; SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE); p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal; NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; p_Mcc->initcgr.we_mask = QM_CGR_WE_MASK; err = QmFreeCgId(p_QmCg->h_Qm, p_QmCg->id); if(err) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QmFreeCgId failed")); } err = QmPortalUnregisterCg(p_QmCg->h_QmPortal, p_QmCg->id); if(err) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QmPortalUnregisterCg failed")); } qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result))); } PUNLOCK(p_QmPortal); XX_Free(p_QmCg); return E_OK; } t_Error QM_CG_SetException(t_Handle h_QmCg, e_QmExceptions exception, bool enable) { t_QmCg *p_QmCg = (t_QmCg *)h_QmCg; struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; t_QmPortal *p_QmPortal; SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE); p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal; if (!p_QmCg->f_Exception) RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Either threshold or exception callback was not configured.")); NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; p_Mcc->initcgr.we_mask = QM_CGR_WE_CSCN_EN; if(exception == e_QM_EX_CG_STATE_CHANGE) { if(enable) p_Mcc->initcgr.cgr.cscn_en = QM_CGR_EN; } else { PUNLOCK(p_QmPortal); RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal exception")); } qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result))); } PUNLOCK(p_QmPortal); return E_OK; } t_Error QM_CG_ModifyWredCurve(t_Handle h_QmCg, t_QmCgModifyWredParams *p_QmCgModifyParams) { t_QmCg *p_QmCg = (t_QmCg *)h_QmCg; uint32_t wredParams; struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; t_QmPortal *p_QmPortal; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE); p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal; NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("QM_MCC_VERB_QUERYCGR failed: %s", mcr_result_str(p_Mcr->result))); } switch(p_QmCgModifyParams->color) { case(e_QM_CG_COLOR_GREEN): if(!p_Mcr->querycgr.cgr.wr_en_g) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for green")); } break; case(e_QM_CG_COLOR_YELLOW): if(!p_Mcr->querycgr.cgr.wr_en_y) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for yellow")); } break; case(e_QM_CG_COLOR_RED): if(!p_Mcr->querycgr.cgr.wr_en_r) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for red")); } break; } p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; switch(p_QmCgModifyParams->color) { case(e_QM_CG_COLOR_GREEN): err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams); p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_G | QM_CGR_WE_WR_PARM_G; p_Mcc->initcgr.cgr.wr_en_g = QM_CGR_EN; p_Mcc->initcgr.cgr.wr_parm_g.word = wredParams; break; case(e_QM_CG_COLOR_YELLOW): err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams); p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_Y | QM_CGR_WE_WR_PARM_Y; p_Mcc->initcgr.cgr.wr_en_y = QM_CGR_EN; p_Mcc->initcgr.cgr.wr_parm_y.word = wredParams; break; case(e_QM_CG_COLOR_RED): err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams); p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_R | QM_CGR_WE_WR_PARM_R; p_Mcc->initcgr.cgr.wr_en_r = QM_CGR_EN; p_Mcc->initcgr.cgr.wr_parm_r.word = wredParams; break; } if (err) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, err, NO_MSG); } qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result))); } PUNLOCK(p_QmPortal); return E_OK; } t_Error QM_CG_ModifyTailDropThreshold(t_Handle h_QmCg, uint32_t threshold) { t_QmCg *p_QmCg = (t_QmCg *)h_QmCg; struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; t_QmPortal *p_QmPortal; uint32_t tmpA, tmpN, ta=0, tn=0; int gap, tmp; SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE); p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal; NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("QM_MCC_VERB_QUERYCGR failed: %s", mcr_result_str(p_Mcr->result))); } if(!p_Mcr->querycgr.cgr.cstd_en) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("Tail Drop is not enabled!")); } p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; p_Mcc->initcgr.we_mask |= QM_CGR_WE_CS_THRES; /* express thresh as ta*2^tn */ gap = (int)threshold; for (tmpA=0 ; tmpA<256; tmpA++ ) for (tmpN=0 ; tmpN<32; tmpN++ ) { tmp = ABS((int)(threshold - tmpA*(1<initcgr.cgr.cs_thres.TA = ta; p_Mcc->initcgr.cgr.cs_thres.Tn = tn; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result))); } PUNLOCK(p_QmPortal); return E_OK; } Index: projects/runtime-coverage/sys/contrib/ncsw/etc/memcpy.c =================================================================== --- projects/runtime-coverage/sys/contrib/ncsw/etc/memcpy.c (revision 325209) +++ projects/runtime-coverage/sys/contrib/ncsw/etc/memcpy.c (revision 325210) @@ -1,620 +1,620 @@ /* * Copyright 2008-2012 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "std_ext.h" #include "xx_ext.h" #include "memcpy_ext.h" void * MemCpy8(void* pDst, void* pSrc, uint32_t size) { - int i; + uint32_t i; for(i = 0; i < size; ++i) *(((uint8_t*)(pDst)) + i) = *(((uint8_t*)(pSrc)) + i); return pDst; } void * MemSet8(void* pDst, int c, uint32_t size) { - int i; + uint32_t i; for(i = 0; i < size; ++i) *(((uint8_t*)(pDst)) + i) = (uint8_t)(c); return pDst; } void * MemCpy32(void* pDst,void* pSrc, uint32_t size) { uint32_t leftAlign; uint32_t rightAlign; uint32_t lastWord; uint32_t currWord; uint32_t *p_Src32; uint32_t *p_Dst32; uint8_t *p_Src8; uint8_t *p_Dst8; p_Src8 = (uint8_t*)(pSrc); p_Dst8 = (uint8_t*)(pDst); /* first copy byte by byte till the source first alignment * this step is necessary to ensure we do not even try to access * data which is before the source buffer, hence it is not ours. */ while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */ { *p_Dst8++ = *p_Src8++; size--; } /* align destination (possibly disaligning source)*/ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */ { *p_Dst8++ = *p_Src8++; size--; } /* dest is aligned and source is not necessarily aligned */ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */ rightAlign = 32 - leftAlign; if (leftAlign == 0) { /* source is also aligned */ p_Src32 = (uint32_t*)(p_Src8); p_Dst32 = (uint32_t*)(p_Dst8); while (size >> 2) /* size >= 4 */ { *p_Dst32++ = *p_Src32++; size -= 4; } p_Src8 = (uint8_t*)(p_Src32); p_Dst8 = (uint8_t*)(p_Dst32); } else { /* source is not aligned (destination is aligned)*/ p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3)); p_Dst32 = (uint32_t*)(p_Dst8); lastWord = *p_Src32++; while(size >> 3) /* size >= 8 */ { currWord = *p_Src32; *p_Dst32 = (lastWord << leftAlign) | (currWord >> rightAlign); lastWord = currWord; p_Src32++; p_Dst32++; size -= 4; } p_Dst8 = (uint8_t*)(p_Dst32); p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3); } /* complete the left overs */ while (size--) *p_Dst8++ = *p_Src8++; return pDst; } void * IO2IOCpy32(void* pDst,void* pSrc, uint32_t size) { uint32_t leftAlign; uint32_t rightAlign; uint32_t lastWord; uint32_t currWord; uint32_t *p_Src32; uint32_t *p_Dst32; uint8_t *p_Src8; uint8_t *p_Dst8; p_Src8 = (uint8_t*)(pSrc); p_Dst8 = (uint8_t*)(pDst); /* first copy byte by byte till the source first alignment * this step is necessary to ensure we do not even try to access * data which is before the source buffer, hence it is not ours. */ while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */ { WRITE_UINT8(*p_Dst8, GET_UINT8(*p_Src8)); p_Dst8++;p_Src8++; size--; } /* align destination (possibly disaligning source)*/ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */ { WRITE_UINT8(*p_Dst8, GET_UINT8(*p_Src8)); p_Dst8++;p_Src8++; size--; } /* dest is aligned and source is not necessarily aligned */ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */ rightAlign = 32 - leftAlign; if (leftAlign == 0) { /* source is also aligned */ p_Src32 = (uint32_t*)(p_Src8); p_Dst32 = (uint32_t*)(p_Dst8); while (size >> 2) /* size >= 4 */ { WRITE_UINT32(*p_Dst32, GET_UINT32(*p_Src32)); p_Dst32++;p_Src32++; size -= 4; } p_Src8 = (uint8_t*)(p_Src32); p_Dst8 = (uint8_t*)(p_Dst32); } else { /* source is not aligned (destination is aligned)*/ p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3)); p_Dst32 = (uint32_t*)(p_Dst8); lastWord = GET_UINT32(*p_Src32); p_Src32++; while(size >> 3) /* size >= 8 */ { currWord = GET_UINT32(*p_Src32); WRITE_UINT32(*p_Dst32, (lastWord << leftAlign) | (currWord >> rightAlign)); lastWord = currWord; p_Src32++;p_Dst32++; size -= 4; } p_Dst8 = (uint8_t*)(p_Dst32); p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3); } /* complete the left overs */ while (size--) { WRITE_UINT8(*p_Dst8, GET_UINT8(*p_Src8)); p_Dst8++;p_Src8++; } return pDst; } void * Mem2IOCpy32(void* pDst,void* pSrc, uint32_t size) { uint32_t leftAlign; uint32_t rightAlign; uint32_t lastWord; uint32_t currWord; uint32_t *p_Src32; uint32_t *p_Dst32; uint8_t *p_Src8; uint8_t *p_Dst8; p_Src8 = (uint8_t*)(pSrc); p_Dst8 = (uint8_t*)(pDst); /* first copy byte by byte till the source first alignment * this step is necessary to ensure we do not even try to access * data which is before the source buffer, hence it is not ours. */ while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */ { WRITE_UINT8(*p_Dst8, *p_Src8); p_Dst8++;p_Src8++; size--; } /* align destination (possibly disaligning source)*/ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */ { WRITE_UINT8(*p_Dst8, *p_Src8); p_Dst8++;p_Src8++; size--; } /* dest is aligned and source is not necessarily aligned */ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */ rightAlign = 32 - leftAlign; if (leftAlign == 0) { /* source is also aligned */ p_Src32 = (uint32_t*)(p_Src8); p_Dst32 = (uint32_t*)(p_Dst8); while (size >> 2) /* size >= 4 */ { WRITE_UINT32(*p_Dst32, *p_Src32); p_Dst32++;p_Src32++; size -= 4; } p_Src8 = (uint8_t*)(p_Src32); p_Dst8 = (uint8_t*)(p_Dst32); } else { /* source is not aligned (destination is aligned)*/ p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3)); p_Dst32 = (uint32_t*)(p_Dst8); lastWord = *p_Src32++; while(size >> 3) /* size >= 8 */ { currWord = *p_Src32; WRITE_UINT32(*p_Dst32, (lastWord << leftAlign) | (currWord >> rightAlign)); lastWord = currWord; p_Src32++;p_Dst32++; size -= 4; } p_Dst8 = (uint8_t*)(p_Dst32); p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3); } /* complete the left overs */ while (size--) { WRITE_UINT8(*p_Dst8, *p_Src8); p_Dst8++;p_Src8++; } return pDst; } void * IO2MemCpy32(void* pDst,void* pSrc, uint32_t size) { uint32_t leftAlign; uint32_t rightAlign; uint32_t lastWord; uint32_t currWord; uint32_t *p_Src32; uint32_t *p_Dst32; uint8_t *p_Src8; uint8_t *p_Dst8; p_Src8 = (uint8_t*)(pSrc); p_Dst8 = (uint8_t*)(pDst); /* first copy byte by byte till the source first alignment * this step is necessary to ensure we do not even try to access * data which is before the source buffer, hence it is not ours. */ while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */ { *p_Dst8 = GET_UINT8(*p_Src8); p_Dst8++;p_Src8++; size--; } /* align destination (possibly disaligning source)*/ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */ { *p_Dst8 = GET_UINT8(*p_Src8); p_Dst8++;p_Src8++; size--; } /* dest is aligned and source is not necessarily aligned */ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */ rightAlign = 32 - leftAlign; if (leftAlign == 0) { /* source is also aligned */ p_Src32 = (uint32_t*)(p_Src8); p_Dst32 = (uint32_t*)(p_Dst8); while (size >> 2) /* size >= 4 */ { *p_Dst32 = GET_UINT32(*p_Src32); p_Dst32++;p_Src32++; size -= 4; } p_Src8 = (uint8_t*)(p_Src32); p_Dst8 = (uint8_t*)(p_Dst32); } else { /* source is not aligned (destination is aligned)*/ p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3)); p_Dst32 = (uint32_t*)(p_Dst8); lastWord = GET_UINT32(*p_Src32); p_Src32++; while(size >> 3) /* size >= 8 */ { currWord = GET_UINT32(*p_Src32); *p_Dst32 = (lastWord << leftAlign) | (currWord >> rightAlign); lastWord = currWord; p_Src32++;p_Dst32++; size -= 4; } p_Dst8 = (uint8_t*)(p_Dst32); p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3); } /* complete the left overs */ while (size--) { *p_Dst8 = GET_UINT8(*p_Src8); p_Dst8++;p_Src8++; } return pDst; } void * MemCpy64(void* pDst,void* pSrc, uint32_t size) { uint32_t leftAlign; uint32_t rightAlign; uint64_t lastWord; uint64_t currWord; uint64_t *pSrc64; uint64_t *pDst64; uint8_t *p_Src8; uint8_t *p_Dst8; p_Src8 = (uint8_t*)(pSrc); p_Dst8 = (uint8_t*)(pDst); /* first copy byte by byte till the source first alignment * this step is necessarily to ensure we do not even try to access * data which is before the source buffer, hence it is not ours. */ while((PTR_TO_UINT(p_Src8) & 7) && size) /* (pSrc mod 8) > 0 and size > 0 */ { *p_Dst8++ = *p_Src8++; size--; } /* align destination (possibly disaligning source)*/ while((PTR_TO_UINT(p_Dst8) & 7) && size) /* (pDst mod 8) > 0 and size > 0 */ { *p_Dst8++ = *p_Src8++; size--; } /* dest is aligned and source is not necessarily aligned */ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 7) << 3); /* leftAlign = (pSrc mod 8)*8 */ rightAlign = 64 - leftAlign; if (leftAlign == 0) { /* source is also aligned */ pSrc64 = (uint64_t*)(p_Src8); pDst64 = (uint64_t*)(p_Dst8); while (size >> 3) /* size >= 8 */ { *pDst64++ = *pSrc64++; size -= 8; } p_Src8 = (uint8_t*)(pSrc64); p_Dst8 = (uint8_t*)(pDst64); } else { /* source is not aligned (destination is aligned)*/ pSrc64 = (uint64_t*)(p_Src8 - (leftAlign >> 3)); pDst64 = (uint64_t*)(p_Dst8); lastWord = *pSrc64++; while(size >> 4) /* size >= 16 */ { currWord = *pSrc64; *pDst64 = (lastWord << leftAlign) | (currWord >> rightAlign); lastWord = currWord; pSrc64++; pDst64++; size -= 8; } p_Dst8 = (uint8_t*)(pDst64); p_Src8 = (uint8_t*)(pSrc64) - 8 + (leftAlign >> 3); } /* complete the left overs */ while (size--) *p_Dst8++ = *p_Src8++; return pDst; } void * MemSet32(void* pDst, uint8_t val, uint32_t size) { uint32_t val32; uint32_t *p_Dst32; uint8_t *p_Dst8; p_Dst8 = (uint8_t*)(pDst); /* generate four 8-bit val's in 32-bit container */ val32 = (uint32_t) val; val32 |= (val32 << 8); val32 |= (val32 << 16); /* align destination to 32 */ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */ { *p_Dst8++ = val; size--; } /* 32-bit chunks */ p_Dst32 = (uint32_t*)(p_Dst8); while (size >> 2) /* size >= 4 */ { *p_Dst32++ = val32; size -= 4; } /* complete the leftovers */ p_Dst8 = (uint8_t*)(p_Dst32); while (size--) *p_Dst8++ = val; return pDst; } void * IOMemSet32(void* pDst, uint8_t val, uint32_t size) { uint32_t val32; uint32_t *p_Dst32; uint8_t *p_Dst8; p_Dst8 = (uint8_t*)(pDst); /* generate four 8-bit val's in 32-bit container */ val32 = (uint32_t) val; val32 |= (val32 << 8); val32 |= (val32 << 16); /* align destination to 32 */ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */ { WRITE_UINT8(*p_Dst8, val); p_Dst8++; size--; } /* 32-bit chunks */ p_Dst32 = (uint32_t*)(p_Dst8); while (size >> 2) /* size >= 4 */ { WRITE_UINT32(*p_Dst32, val32); p_Dst32++; size -= 4; } /* complete the leftovers */ p_Dst8 = (uint8_t*)(p_Dst32); while (size--) { WRITE_UINT8(*p_Dst8, val); p_Dst8++; } return pDst; } void * MemSet64(void* pDst, uint8_t val, uint32_t size) { uint64_t val64; uint64_t *pDst64; uint8_t *p_Dst8; p_Dst8 = (uint8_t*)(pDst); /* generate four 8-bit val's in 32-bit container */ val64 = (uint64_t) val; val64 |= (val64 << 8); val64 |= (val64 << 16); val64 |= (val64 << 24); val64 |= (val64 << 32); /* align destination to 64 */ while((PTR_TO_UINT(p_Dst8) & 7) && size) /* (pDst mod 8) > 0 and size > 0 */ { *p_Dst8++ = val; size--; } /* 64-bit chunks */ pDst64 = (uint64_t*)(p_Dst8); while (size >> 4) /* size >= 8 */ { *pDst64++ = val64; size -= 8; } /* complete the leftovers */ p_Dst8 = (uint8_t*)(pDst64); while (size--) *p_Dst8++ = val; return pDst; } void MemDisp(uint8_t *p, int size) { uint32_t space = (uint32_t)(PTR_TO_UINT(p) & 0x3); uint8_t *p_Limit; if (space) { p_Limit = (p - space + 4); XX_Print("0x%08X: ", (p - space)); while (space--) { XX_Print("--"); } while (size && (p < p_Limit)) { XX_Print("%02x", *(uint8_t*)p); size--; p++; } XX_Print(" "); p_Limit += 12; while ((size > 3) && (p < p_Limit)) { XX_Print("%08x ", *(uint32_t*)p); size -= 4; p += 4; } XX_Print("\r\n"); } while (size > 15) { XX_Print("0x%08X: %08x %08x %08x %08x\r\n", p, *(uint32_t *)p, *(uint32_t *)(p + 4), *(uint32_t *)(p + 8), *(uint32_t *)(p + 12)); size -= 16; p += 16; } if (size) { XX_Print("0x%08X: ", p); while (size > 3) { XX_Print("%08x ", *(uint32_t *)p); size -= 4; p += 4; } while (size) { XX_Print("%02x", *(uint8_t *)p); size--; p++; } XX_Print("\r\n"); } } Index: projects/runtime-coverage/sys/dev/dpaa/fman.c =================================================================== --- projects/runtime-coverage/sys/dev/dpaa/fman.c (revision 325209) +++ projects/runtime-coverage/sys/dev/dpaa/fman.c (revision 325210) @@ -1,408 +1,586 @@ /*- * Copyright (c) 2011-2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include +#include + #include "opt_platform.h" #include #include #include #include #include "fman.h" static MALLOC_DEFINE(M_FMAN, "fman", "fman devices information"); /** * @group FMan private defines. * @{ */ enum fman_irq_enum { FMAN_IRQ_NUM = 0, FMAN_ERR_IRQ_NUM = 1 }; enum fman_mu_ram_map { FMAN_MURAM_OFF = 0x0, FMAN_MURAM_SIZE = 0x28000 }; struct fman_config { device_t fman_device; uintptr_t mem_base_addr; uintptr_t irq_num; uintptr_t err_irq_num; uint8_t fm_id; t_FmExceptionsCallback *exception_callback; t_FmBusErrorCallback *bus_error_callback; }; /** * @group FMan private methods/members. * @{ */ /** * Frame Manager firmware. * We use the same firmware for both P3041 and P2041 devices. */ const uint32_t fman_firmware[] = FMAN_UC_IMG; const uint32_t fman_firmware_size = sizeof(fman_firmware); static struct fman_softc *fm_sc = NULL; +int +fman_activate_resource(device_t bus, device_t child, int type, int rid, + struct resource *res) +{ + struct fman_softc *sc; + bus_space_tag_t bt; + bus_space_handle_t bh; + int i, rv; + + sc = device_get_softc(bus); + if (type != SYS_RES_IRQ) { + for (i = 0; i < sc->sc_base.nranges; i++) { + if (rman_is_region_manager(res, &sc->rman) != 0) { + bt = rman_get_bustag(sc->mem_res); + rv = bus_space_subregion(bt, + rman_get_bushandle(sc->mem_res), + rman_get_start(res) - + rman_get_start(sc->mem_res), + rman_get_size(res), &bh); + if (rv != 0) + return (rv); + rman_set_bustag(res, bt); + rman_set_bushandle(res, bh); + return (rman_activate_resource(res)); + } + } + return (EINVAL); + } + return (bus_generic_activate_resource(bus, child, type, rid, res)); +} + +int +fman_release_resource(device_t bus, device_t child, int type, int rid, + struct resource *res) +{ + struct fman_softc *sc; + struct resource_list *rl; + struct resource_list_entry *rle; + int passthrough, rv; + + passthrough = (device_get_parent(child) != bus); + rl = BUS_GET_RESOURCE_LIST(bus, child); + sc = device_get_softc(bus); + if (type != SYS_RES_IRQ) { + if ((rman_get_flags(res) & RF_ACTIVE) != 0 ){ + rv = bus_deactivate_resource(child, type, rid, res); + if (rv != 0) + return (rv); + } + rv = rman_release_resource(res); + if (rv != 0) + return (rv); + if (!passthrough) { + rle = resource_list_find(rl, type, rid); + KASSERT(rle != NULL, + ("%s: resource entry not found!", __func__)); + KASSERT(rle->res != NULL, + ("%s: resource entry is not busy", __func__)); + rle->res = NULL; + } + return (0); + } + return (resource_list_release(rl, bus, child, type, rid, res)); +} + +struct resource * +fman_alloc_resource(device_t bus, device_t child, int type, int *rid, + rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) +{ + struct fman_softc *sc; + struct resource_list *rl; + struct resource_list_entry *rle = NULL; + struct resource *res; + int i, isdefault, passthrough; + + isdefault = RMAN_IS_DEFAULT_RANGE(start, end); + passthrough = (device_get_parent(child) != bus); + sc = device_get_softc(bus); + rl = BUS_GET_RESOURCE_LIST(bus, child); + switch (type) { + case SYS_RES_MEMORY: + KASSERT(!(isdefault && passthrough), + ("%s: passthrough of default allocation", __func__)); + if (!passthrough) { + rle = resource_list_find(rl, type, *rid); + if (rle == NULL) + return (NULL); + KASSERT(rle->res == NULL, + ("%s: resource entry is busy", __func__)); + if (isdefault) { + start = rle->start; + count = ulmax(count, rle->count); + end = ulmax(rle->end, start + count - 1); + } + } + + res = NULL; + /* Map fman ranges to nexus ranges. */ + for (i = 0; i < sc->sc_base.nranges; i++) { + if (start >= sc->sc_base.ranges[i].bus && end < + sc->sc_base.ranges[i].bus + sc->sc_base.ranges[i].size) { + start += rman_get_start(sc->mem_res); + end += rman_get_start(sc->mem_res); + res = rman_reserve_resource(&sc->rman, start, + end, count, flags & ~RF_ACTIVE, child); + if (res == NULL) + return (NULL); + rman_set_rid(res, *rid); + if ((flags & RF_ACTIVE) != 0 && bus_activate_resource( + child, type, *rid, res) != 0) { + rman_release_resource(res); + return (NULL); + } + break; + } + } + if (!passthrough) + rle->res = res; + return (res); + case SYS_RES_IRQ: + return (resource_list_alloc(rl, bus, child, type, rid, start, + end, count, flags)); + } + return (NULL); +} + +static int +fman_fill_ranges(phandle_t node, struct simplebus_softc *sc) +{ + int host_address_cells; + cell_t *base_ranges; + ssize_t nbase_ranges; + int err; + int i, j, k; + + err = OF_searchencprop(OF_parent(node), "#address-cells", + &host_address_cells, sizeof(host_address_cells)); + if (err <= 0) + return (-1); + + nbase_ranges = OF_getproplen(node, "ranges"); + if (nbase_ranges < 0) + return (-1); + sc->nranges = nbase_ranges / sizeof(cell_t) / + (sc->acells + host_address_cells + sc->scells); + if (sc->nranges == 0) + return (0); + + sc->ranges = malloc(sc->nranges * sizeof(sc->ranges[0]), + M_DEVBUF, M_WAITOK); + base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); + OF_getencprop(node, "ranges", base_ranges, nbase_ranges); + + for (i = 0, j = 0; i < sc->nranges; i++) { + sc->ranges[i].bus = 0; + for (k = 0; k < sc->acells; k++) { + sc->ranges[i].bus <<= 32; + sc->ranges[i].bus |= base_ranges[j++]; + } + sc->ranges[i].host = 0; + for (k = 0; k < host_address_cells; k++) { + sc->ranges[i].host <<= 32; + sc->ranges[i].host |= base_ranges[j++]; + } + sc->ranges[i].size = 0; + for (k = 0; k < sc->scells; k++) { + sc->ranges[i].size <<= 32; + sc->ranges[i].size |= base_ranges[j++]; + } + } + + free(base_ranges, M_DEVBUF); + return (sc->nranges); +} + static t_Handle fman_init(struct fman_softc *sc, struct fman_config *cfg) { - struct ofw_bus_devinfo obd; phandle_t node; t_FmParams fm_params; t_Handle muram_handle, fm_handle; t_Error error; t_FmRevisionInfo revision_info; uint16_t clock; uint32_t tmp, mod; /* MURAM configuration */ muram_handle = FM_MURAM_ConfigAndInit(cfg->mem_base_addr + FMAN_MURAM_OFF, FMAN_MURAM_SIZE); if (muram_handle == NULL) { device_printf(cfg->fman_device, "couldn't init FM MURAM module" "\n"); return (NULL); } sc->muram_handle = muram_handle; /* Fill in FM configuration */ fm_params.fmId = cfg->fm_id; /* XXX we support only one partition thus each fman has master id */ fm_params.guestId = NCSW_MASTER_ID; fm_params.baseAddr = cfg->mem_base_addr; fm_params.h_FmMuram = muram_handle; /* Get FMan clock in Hz */ if ((tmp = fman_get_clock(sc)) == 0) return (NULL); /* Convert FMan clock to MHz */ clock = (uint16_t)(tmp / 1000000); mod = tmp % 1000000; if (mod >= 500000) ++clock; fm_params.fmClkFreq = clock; fm_params.f_Exception = cfg->exception_callback; fm_params.f_BusError = cfg->bus_error_callback; fm_params.h_App = cfg->fman_device; fm_params.irq = cfg->irq_num; fm_params.errIrq = cfg->err_irq_num; fm_params.firmware.size = fman_firmware_size; fm_params.firmware.p_Code = (uint32_t*)fman_firmware; fm_handle = FM_Config(&fm_params); if (fm_handle == NULL) { device_printf(cfg->fman_device, "couldn't configure FM " "module\n"); goto err; } FM_ConfigResetOnInit(fm_handle, TRUE); error = FM_Init(fm_handle); if (error != E_OK) { device_printf(cfg->fman_device, "couldn't init FM module\n"); goto err2; } error = FM_GetRevision(fm_handle, &revision_info); if (error != E_OK) { device_printf(cfg->fman_device, "couldn't get FM revision\n"); goto err2; } device_printf(cfg->fman_device, "Hardware version: %d.%d.\n", revision_info.majorRev, revision_info.minorRev); /* Initialize the simplebus part of things */ simplebus_init(sc->sc_base.dev, 0); node = ofw_bus_get_node(sc->sc_base.dev); + fman_fill_ranges(node, &sc->sc_base); + sc->rman.rm_type = RMAN_ARRAY; + sc->rman.rm_descr = "FMan range"; + rman_init_from_resource(&sc->rman, sc->mem_res); for (node = OF_child(node); node > 0; node = OF_peer(node)) { - if (ofw_bus_gen_setup_devinfo(&obd, node) != 0) - continue; simplebus_add_device(sc->sc_base.dev, node, 0, NULL, -1, NULL); } return (fm_handle); err2: FM_Free(fm_handle); err: FM_MURAM_Free(muram_handle); return (NULL); } static void fman_exception_callback(t_Handle app_handle, e_FmExceptions exception) { struct fman_softc *sc; sc = app_handle; device_printf(sc->sc_base.dev, "FMan exception occurred.\n"); } static void fman_error_callback(t_Handle app_handle, e_FmPortType port_type, uint8_t port_id, uint64_t addr, uint8_t tnum, uint16_t liodn) { struct fman_softc *sc; sc = app_handle; device_printf(sc->sc_base.dev, "FMan error occurred.\n"); } /** @} */ /** * @group FMan driver interface. * @{ */ int fman_get_handle(t_Handle *fmh) { if (fm_sc == NULL) return (ENOMEM); *fmh = fm_sc->fm_handle; return (0); } int fman_get_muram_handle(t_Handle *muramh) { if (fm_sc == NULL) return (ENOMEM); *muramh = fm_sc->muram_handle; return (0); } int fman_get_bushandle(vm_offset_t *fm_base) { if (fm_sc == NULL) return (ENOMEM); *fm_base = rman_get_bushandle(fm_sc->mem_res); return (0); } int fman_get_dev(device_t *fm_dev) { if (fm_sc == NULL) return (ENOMEM); *fm_dev = fm_sc->sc_base.dev; return (0); } int fman_attach(device_t dev) { struct fman_softc *sc; struct fman_config cfg; pcell_t qchan_range[2]; phandle_t node; sc = device_get_softc(dev); sc->sc_base.dev = dev; fm_sc = sc; /* Check if MallocSmart allocator is ready */ if (XX_MallocSmartInit() != E_OK) { device_printf(dev, "could not initialize smart allocator.\n"); return (ENXIO); } node = ofw_bus_get_node(dev); if (OF_getencprop(node, "fsl,qman-channel-range", qchan_range, sizeof(qchan_range)) <= 0) { device_printf(dev, "Missing QMan channel range property!\n"); return (ENXIO); } sc->qman_chan_base = qchan_range[0]; sc->qman_chan_count = qchan_range[1]; sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->mem_res) { device_printf(dev, "could not allocate memory.\n"); return (ENXIO); } sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "could not allocate interrupt.\n"); goto err; } /* * XXX: Fix FMan interrupt. This is workaround for the issue with * interrupts directed to multiple CPUs by the interrupts subsystem. * Workaround is to bind the interrupt to only one CPU0. */ XX_FmanFixIntr(rman_get_start(sc->irq_res)); sc->err_irq_rid = 1; sc->err_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->err_irq_rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->err_irq_res) { device_printf(dev, "could not allocate error interrupt.\n"); goto err; } /* Set FMan configuration */ cfg.fman_device = dev; cfg.fm_id = device_get_unit(dev); cfg.mem_base_addr = rman_get_bushandle(sc->mem_res); cfg.irq_num = (uintptr_t)sc->irq_res; cfg.err_irq_num = (uintptr_t)sc->err_irq_res; cfg.exception_callback = fman_exception_callback; cfg.bus_error_callback = fman_error_callback; sc->fm_handle = fman_init(sc, &cfg); if (sc->fm_handle == NULL) { device_printf(dev, "could not be configured\n"); return (ENXIO); } return (bus_generic_attach(dev)); err: fman_detach(dev); return (ENXIO); } int fman_detach(device_t dev) { struct fman_softc *sc; sc = device_get_softc(dev); if (sc->muram_handle) { FM_MURAM_Free(sc->muram_handle); } if (sc->fm_handle) { FM_Free(sc->fm_handle); } if (sc->mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->err_irq_rid, sc->err_irq_res); } return (0); } int fman_suspend(device_t dev) { return (0); } int fman_resume_dev(device_t dev) { return (0); } int fman_shutdown(device_t dev) { return (0); } int fman_qman_channel_id(device_t dev, int port) { struct fman_softc *sc; int qman_port_id[] = {0x31, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}; int i; sc = device_get_softc(dev); for (i = 0; i < sc->qman_chan_count; i++) { if (qman_port_id[i] == port) return (sc->qman_chan_base + i); } return (0); } /** @} */ Index: projects/runtime-coverage/sys/dev/dpaa/fman.h =================================================================== --- projects/runtime-coverage/sys/dev/dpaa/fman.h (revision 325209) +++ projects/runtime-coverage/sys/dev/dpaa/fman.h (revision 325210) @@ -1,73 +1,80 @@ /*- * Copyright (c) 2011-2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef FMAN_H_ #define FMAN_H_ #include /** * FMan driver instance data. */ struct fman_softc { struct simplebus_softc sc_base; struct resource *mem_res; struct resource *irq_res; struct resource *err_irq_res; + struct rman rman; int mem_rid; int irq_rid; int err_irq_rid; int qman_chan_base; int qman_chan_count; t_Handle fm_handle; t_Handle muram_handle; }; /** * @group QMan bus interface. * @{ */ +struct resource * fman_alloc_resource(device_t bus, device_t child, int type, + int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); +int fman_activate_resource(device_t bus, device_t child, + int type, int rid, struct resource *res); +int fman_release_resource(device_t bus, device_t child, int type, int rid, + struct resource *res); int fman_attach(device_t dev); int fman_detach(device_t dev); int fman_suspend(device_t dev); int fman_resume_dev(device_t dev); int fman_shutdown(device_t dev); int fman_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); int fman_qman_channel_id(device_t, int); /** @} */ uint32_t fman_get_clock(struct fman_softc *sc); int fman_get_handle(t_Handle *fmh); int fman_get_muram_handle(t_Handle *muramh); int fman_get_bushandle(vm_offset_t *fm_base); int fman_get_dev(device_t *fmd); #endif /* FMAN_H_ */ Index: projects/runtime-coverage/sys/dev/dpaa/fman_fdt.c =================================================================== --- projects/runtime-coverage/sys/dev/dpaa/fman_fdt.c (revision 325209) +++ projects/runtime-coverage/sys/dev/dpaa/fman_fdt.c (revision 325210) @@ -1,101 +1,104 @@ /*- * Copyright (c) 2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include "fman.h" #define FFMAN_DEVSTR "Freescale Frame Manager" static int fman_fdt_probe(device_t dev); static device_method_t fman_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fman_fdt_probe), DEVMETHOD(device_attach, fman_attach), DEVMETHOD(device_detach, fman_detach), DEVMETHOD(device_shutdown, fman_shutdown), DEVMETHOD(device_suspend, fman_suspend), DEVMETHOD(device_resume, fman_resume_dev), + DEVMETHOD(bus_alloc_resource, fman_alloc_resource), + DEVMETHOD(bus_activate_resource, fman_activate_resource), + DEVMETHOD(bus_release_resource, fman_release_resource), { 0, 0 } }; DEFINE_CLASS_1(fman, fman_driver, fman_methods, sizeof(struct fman_softc), simplebus_driver); static devclass_t fman_devclass; EARLY_DRIVER_MODULE(fman, simplebus, fman_driver, fman_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); static int fman_fdt_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "fsl,fman")) return (ENXIO); device_set_desc(dev, FFMAN_DEVSTR); return (BUS_PROBE_DEFAULT); } uint32_t fman_get_clock(struct fman_softc *sc) { device_t dev; phandle_t node; pcell_t fman_clock; dev = sc->sc_base.dev; node = ofw_bus_get_node(dev); if ((OF_getprop(node, "clock-frequency", &fman_clock, sizeof(fman_clock)) <= 0) || (fman_clock == 0)) { device_printf(dev, "could not acquire correct frequency " "from DTS\n"); return (0); } return ((uint32_t)fman_clock); } Index: projects/runtime-coverage/sys/dev/dpaa/if_dtsec.c =================================================================== --- projects/runtime-coverage/sys/dev/dpaa/if_dtsec.c (revision 325209) +++ projects/runtime-coverage/sys/dev/dpaa/if_dtsec.c (revision 325210) @@ -1,828 +1,828 @@ /*- * Copyright (c) 2011-2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miibus_if.h" #include #include #include #include #include "fman.h" #include "if_dtsec.h" #include "if_dtsec_im.h" #include "if_dtsec_rm.h" /** * @group dTSEC private defines. * @{ */ /** * dTSEC FMan MAC exceptions info struct. */ struct dtsec_fm_mac_ex_str { const int num; const char *str; }; /** @} */ /** * @group FMan MAC routines. * @{ */ #define DTSEC_MAC_EXCEPTIONS_END (-1) /** * FMan MAC exceptions. */ static const struct dtsec_fm_mac_ex_str dtsec_fm_mac_exceptions[] = { { e_FM_MAC_EX_10G_MDIO_SCAN_EVENTMDIO, "MDIO scan event" }, { e_FM_MAC_EX_10G_MDIO_CMD_CMPL, "MDIO command completion" }, { e_FM_MAC_EX_10G_REM_FAULT, "Remote fault" }, { e_FM_MAC_EX_10G_LOC_FAULT, "Local fault" }, { e_FM_MAC_EX_10G_1TX_ECC_ER, "Transmit frame ECC error" }, { e_FM_MAC_EX_10G_TX_FIFO_UNFL, "Transmit FIFO underflow" }, { e_FM_MAC_EX_10G_TX_FIFO_OVFL, "Receive FIFO overflow" }, { e_FM_MAC_EX_10G_TX_ER, "Transmit frame error" }, { e_FM_MAC_EX_10G_RX_FIFO_OVFL, "Receive FIFO overflow" }, { e_FM_MAC_EX_10G_RX_ECC_ER, "Receive frame ECC error" }, { e_FM_MAC_EX_10G_RX_JAB_FRM, "Receive jabber frame" }, { e_FM_MAC_EX_10G_RX_OVRSZ_FRM, "Receive oversized frame" }, { e_FM_MAC_EX_10G_RX_RUNT_FRM, "Receive runt frame" }, { e_FM_MAC_EX_10G_RX_FRAG_FRM, "Receive fragment frame" }, { e_FM_MAC_EX_10G_RX_LEN_ER, "Receive payload length error" }, { e_FM_MAC_EX_10G_RX_CRC_ER, "Receive CRC error" }, { e_FM_MAC_EX_10G_RX_ALIGN_ER, "Receive alignment error" }, { e_FM_MAC_EX_1G_BAB_RX, "Babbling receive error" }, { e_FM_MAC_EX_1G_RX_CTL, "Receive control (pause frame) interrupt" }, { e_FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET, "Graceful transmit stop " "complete" }, { e_FM_MAC_EX_1G_BAB_TX, "Babbling transmit error" }, { e_FM_MAC_EX_1G_TX_CTL, "Transmit control (pause frame) interrupt" }, { e_FM_MAC_EX_1G_TX_ERR, "Transmit error" }, { e_FM_MAC_EX_1G_LATE_COL, "Late collision" }, { e_FM_MAC_EX_1G_COL_RET_LMT, "Collision retry limit" }, { e_FM_MAC_EX_1G_TX_FIFO_UNDRN, "Transmit FIFO underrun" }, { e_FM_MAC_EX_1G_MAG_PCKT, "Magic Packet detected when dTSEC is in " "Magic Packet detection mode" }, { e_FM_MAC_EX_1G_MII_MNG_RD_COMPLET, "MII management read completion" }, { e_FM_MAC_EX_1G_MII_MNG_WR_COMPLET, "MII management write completion" }, { e_FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET, "Graceful receive stop " "complete" }, { e_FM_MAC_EX_1G_TX_DATA_ERR, "Internal data error on transmit" }, { e_FM_MAC_EX_1G_RX_DATA_ERR, "Internal data error on receive" }, { e_FM_MAC_EX_1G_1588_TS_RX_ERR, "Time-Stamp Receive Error" }, { e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, "MIB counter overflow" }, { DTSEC_MAC_EXCEPTIONS_END, "" } }; static const char * dtsec_fm_mac_ex_to_str(e_FmMacExceptions exception) { int i; for (i = 0; dtsec_fm_mac_exceptions[i].num != exception && dtsec_fm_mac_exceptions[i].num != DTSEC_MAC_EXCEPTIONS_END; ++i) ; if (dtsec_fm_mac_exceptions[i].num == DTSEC_MAC_EXCEPTIONS_END) return (""); return (dtsec_fm_mac_exceptions[i].str); } static void dtsec_fm_mac_mdio_event_callback(t_Handle h_App, e_FmMacExceptions exception) { struct dtsec_softc *sc; sc = h_App; device_printf(sc->sc_dev, "MDIO event %i: %s.\n", exception, dtsec_fm_mac_ex_to_str(exception)); } static void dtsec_fm_mac_exception_callback(t_Handle app, e_FmMacExceptions exception) { struct dtsec_softc *sc; sc = app; device_printf(sc->sc_dev, "MAC exception %i: %s.\n", exception, dtsec_fm_mac_ex_to_str(exception)); } static void dtsec_fm_mac_free(struct dtsec_softc *sc) { if (sc->sc_mach == NULL) return; FM_MAC_Disable(sc->sc_mach, e_COMM_MODE_RX_AND_TX); FM_MAC_Free(sc->sc_mach); sc->sc_mach = NULL; } static int dtsec_fm_mac_init(struct dtsec_softc *sc, uint8_t *mac) { t_FmMacParams params; t_Error error; memset(¶ms, 0, sizeof(params)); memcpy(¶ms.addr, mac, sizeof(params.addr)); - params.baseAddr = sc->sc_fm_base + sc->sc_mac_mem_offset; + params.baseAddr = rman_get_bushandle(sc->sc_mem); params.enetMode = sc->sc_mac_enet_mode; params.macId = sc->sc_eth_id; params.mdioIrq = sc->sc_mac_mdio_irq; params.f_Event = dtsec_fm_mac_mdio_event_callback; params.f_Exception = dtsec_fm_mac_exception_callback; params.h_App = sc; params.h_Fm = sc->sc_fmh; sc->sc_mach = FM_MAC_Config(¶ms); if (sc->sc_mach == NULL) { device_printf(sc->sc_dev, "couldn't configure FM_MAC module.\n" ); return (ENXIO); } error = FM_MAC_ConfigResetOnInit(sc->sc_mach, TRUE); if (error != E_OK) { device_printf(sc->sc_dev, "couldn't enable reset on init " "feature.\n"); dtsec_fm_mac_free(sc); return (ENXIO); } /* Do not inform about pause frames */ error = FM_MAC_ConfigException(sc->sc_mach, e_FM_MAC_EX_1G_RX_CTL, FALSE); if (error != E_OK) { device_printf(sc->sc_dev, "couldn't disable pause frames " "exception.\n"); dtsec_fm_mac_free(sc); return (ENXIO); } error = FM_MAC_Init(sc->sc_mach); if (error != E_OK) { device_printf(sc->sc_dev, "couldn't initialize FM_MAC module." "\n"); dtsec_fm_mac_free(sc); return (ENXIO); } return (0); } /** @} */ /** * @group FMan PORT routines. * @{ */ static const char * dtsec_fm_port_ex_to_str(e_FmPortExceptions exception) { switch (exception) { case e_FM_PORT_EXCEPTION_IM_BUSY: return ("IM: RX busy"); default: return (""); } } void dtsec_fm_port_rx_exception_callback(t_Handle app, e_FmPortExceptions exception) { struct dtsec_softc *sc; sc = app; device_printf(sc->sc_dev, "RX exception: %i: %s.\n", exception, dtsec_fm_port_ex_to_str(exception)); } void dtsec_fm_port_tx_exception_callback(t_Handle app, e_FmPortExceptions exception) { struct dtsec_softc *sc; sc = app; device_printf(sc->sc_dev, "TX exception: %i: %s.\n", exception, dtsec_fm_port_ex_to_str(exception)); } e_FmPortType dtsec_fm_port_rx_type(enum eth_dev_type type) { switch (type) { case ETH_DTSEC: return (e_FM_PORT_TYPE_RX); case ETH_10GSEC: return (e_FM_PORT_TYPE_RX_10G); default: return (e_FM_PORT_TYPE_DUMMY); } } e_FmPortType dtsec_fm_port_tx_type(enum eth_dev_type type) { switch (type) { case ETH_DTSEC: return (e_FM_PORT_TYPE_TX); case ETH_10GSEC: return (e_FM_PORT_TYPE_TX_10G); default: return (e_FM_PORT_TYPE_DUMMY); } } static void dtsec_fm_port_free_both(struct dtsec_softc *sc) { if (sc->sc_rxph) { FM_PORT_Free(sc->sc_rxph); sc->sc_rxph = NULL; } if (sc->sc_txph) { FM_PORT_Free(sc->sc_txph); sc->sc_txph = NULL; } } /** @} */ /** * @group IFnet routines. * @{ */ static int dtsec_if_enable_locked(struct dtsec_softc *sc) { int error; DTSEC_LOCK_ASSERT(sc); error = FM_MAC_Enable(sc->sc_mach, e_COMM_MODE_RX_AND_TX); if (error != E_OK) return (EIO); error = FM_PORT_Enable(sc->sc_rxph); if (error != E_OK) return (EIO); error = FM_PORT_Enable(sc->sc_txph); if (error != E_OK) return (EIO); sc->sc_ifnet->if_drv_flags |= IFF_DRV_RUNNING; /* Refresh link state */ dtsec_miibus_statchg(sc->sc_dev); return (0); } static int dtsec_if_disable_locked(struct dtsec_softc *sc) { int error; DTSEC_LOCK_ASSERT(sc); error = FM_MAC_Disable(sc->sc_mach, e_COMM_MODE_RX_AND_TX); if (error != E_OK) return (EIO); error = FM_PORT_Disable(sc->sc_rxph); if (error != E_OK) return (EIO); error = FM_PORT_Disable(sc->sc_txph); if (error != E_OK) return (EIO); sc->sc_ifnet->if_drv_flags &= ~IFF_DRV_RUNNING; return (0); } static int dtsec_if_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct dtsec_softc *sc; struct ifreq *ifr; int error; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = 0; /* Basic functionality to achieve media status reports */ switch (command) { case SIOCSIFFLAGS: DTSEC_LOCK(sc); if (sc->sc_ifnet->if_flags & IFF_UP) error = dtsec_if_enable_locked(sc); else error = dtsec_if_disable_locked(sc); DTSEC_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); } return (error); } static void dtsec_if_tick(void *arg) { struct dtsec_softc *sc; sc = arg; /* TODO */ DTSEC_LOCK(sc); mii_tick(sc->sc_mii); callout_reset(&sc->sc_tick_callout, hz, dtsec_if_tick, sc); DTSEC_UNLOCK(sc); } static void dtsec_if_deinit_locked(struct dtsec_softc *sc) { DTSEC_LOCK_ASSERT(sc); DTSEC_UNLOCK(sc); callout_drain(&sc->sc_tick_callout); DTSEC_LOCK(sc); } static void dtsec_if_init_locked(struct dtsec_softc *sc) { int error; DTSEC_LOCK_ASSERT(sc); /* Set MAC address */ error = FM_MAC_ModifyMacAddr(sc->sc_mach, (t_EnetAddr *)IF_LLADDR(sc->sc_ifnet)); if (error != E_OK) { device_printf(sc->sc_dev, "couldn't set MAC address.\n"); goto err; } /* Start MII polling */ if (sc->sc_mii) callout_reset(&sc->sc_tick_callout, hz, dtsec_if_tick, sc); if (sc->sc_ifnet->if_flags & IFF_UP) { error = dtsec_if_enable_locked(sc); if (error != 0) goto err; } else { error = dtsec_if_disable_locked(sc); if (error != 0) goto err; } return; err: dtsec_if_deinit_locked(sc); device_printf(sc->sc_dev, "initialization error.\n"); return; } static void dtsec_if_init(void *data) { struct dtsec_softc *sc; sc = data; DTSEC_LOCK(sc); dtsec_if_init_locked(sc); DTSEC_UNLOCK(sc); } static void dtsec_if_start(struct ifnet *ifp) { struct dtsec_softc *sc; sc = ifp->if_softc; DTSEC_LOCK(sc); sc->sc_start_locked(sc); DTSEC_UNLOCK(sc); } static void dtsec_if_watchdog(struct ifnet *ifp) { /* TODO */ } /** @} */ /** * @group IFmedia routines. * @{ */ static int dtsec_ifmedia_upd(struct ifnet *ifp) { struct dtsec_softc *sc = ifp->if_softc; DTSEC_LOCK(sc); mii_mediachg(sc->sc_mii); DTSEC_UNLOCK(sc); return (0); } static void dtsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct dtsec_softc *sc = ifp->if_softc; DTSEC_LOCK(sc); mii_pollstat(sc->sc_mii); ifmr->ifm_active = sc->sc_mii->mii_media_active; ifmr->ifm_status = sc->sc_mii->mii_media_status; DTSEC_UNLOCK(sc); } /** @} */ /** * @group dTSEC bus interface. * @{ */ static void dtsec_configure_mode(struct dtsec_softc *sc) { char tunable[64]; snprintf(tunable, sizeof(tunable), "%s.independent_mode", device_get_nameunit(sc->sc_dev)); sc->sc_mode = DTSEC_MODE_REGULAR; TUNABLE_INT_FETCH(tunable, &sc->sc_mode); if (sc->sc_mode == DTSEC_MODE_REGULAR) { sc->sc_port_rx_init = dtsec_rm_fm_port_rx_init; sc->sc_port_tx_init = dtsec_rm_fm_port_tx_init; sc->sc_start_locked = dtsec_rm_if_start_locked; } else { sc->sc_port_rx_init = dtsec_im_fm_port_rx_init; sc->sc_port_tx_init = dtsec_im_fm_port_tx_init; sc->sc_start_locked = dtsec_im_if_start_locked; } device_printf(sc->sc_dev, "Configured for %s mode.\n", (sc->sc_mode == DTSEC_MODE_REGULAR) ? "regular" : "independent"); } int dtsec_attach(device_t dev) { struct dtsec_softc *sc; int error; struct ifnet *ifp; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_mac_mdio_irq = NO_IRQ; sc->sc_eth_id = device_get_unit(dev); /* Check if MallocSmart allocator is ready */ if (XX_MallocSmartInit() != E_OK) return (ENXIO); /* Init locks */ mtx_init(&sc->sc_lock, device_get_nameunit(dev), "DTSEC Global Lock", MTX_DEF); mtx_init(&sc->sc_mii_lock, device_get_nameunit(dev), "DTSEC MII Lock", MTX_DEF); /* Init callouts */ callout_init(&sc->sc_tick_callout, CALLOUT_MPSAFE); /* Read configuraton */ if ((error = fman_get_handle(&sc->sc_fmh)) != 0) return (error); if ((error = fman_get_muram_handle(&sc->sc_muramh)) != 0) return (error); if ((error = fman_get_bushandle(&sc->sc_fm_base)) != 0) return (error); /* Configure working mode */ dtsec_configure_mode(sc); /* If we are working in regular mode configure BMAN and QMAN */ if (sc->sc_mode == DTSEC_MODE_REGULAR) { /* Create RX buffer pool */ error = dtsec_rm_pool_rx_init(sc); if (error != 0) return (EIO); /* Create RX frame queue range */ error = dtsec_rm_fqr_rx_init(sc); if (error != 0) return (EIO); /* Create frame info pool */ error = dtsec_rm_fi_pool_init(sc); if (error != 0) return (EIO); /* Create TX frame queue range */ error = dtsec_rm_fqr_tx_init(sc); if (error != 0) return (EIO); } /* Init FMan MAC module. */ error = dtsec_fm_mac_init(sc, sc->sc_mac_addr); if (error != 0) { dtsec_detach(dev); return (ENXIO); } /* Init FMan TX port */ error = sc->sc_port_tx_init(sc, device_get_unit(sc->sc_dev)); if (error != 0) { dtsec_detach(dev); return (ENXIO); } /* Init FMan RX port */ error = sc->sc_port_rx_init(sc, device_get_unit(sc->sc_dev)); if (error != 0) { dtsec_detach(dev); return (ENXIO); } /* Create network interface for upper layers */ ifp = sc->sc_ifnet = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->sc_dev, "if_alloc() failed.\n"); dtsec_detach(dev); return (ENOMEM); } ifp->if_softc = sc; ifp->if_mtu = ETHERMTU; /* TODO: Configure */ ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST; ifp->if_init = dtsec_if_init; ifp->if_start = dtsec_if_start; ifp->if_ioctl = dtsec_if_ioctl; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; if (sc->sc_phy_addr >= 0) if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); else if_initname(ifp, "dtsec_phy", device_get_unit(sc->sc_dev)); /* TODO */ #if 0 IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1); ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1; IFQ_SET_READY(&ifp->if_snd); #endif ifp->if_capabilities = 0; /* TODO: Check */ ifp->if_capenable = ifp->if_capabilities; /* Attach PHY(s) */ error = mii_attach(sc->sc_dev, &sc->sc_mii_dev, ifp, dtsec_ifmedia_upd, dtsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->sc_phy_addr, MII_OFFSET_ANY, 0); if (error) { device_printf(sc->sc_dev, "attaching PHYs failed: %d\n", error); dtsec_detach(sc->sc_dev); return (error); } sc->sc_mii = device_get_softc(sc->sc_mii_dev); /* Attach to stack */ ether_ifattach(ifp, sc->sc_mac_addr); return (0); } int dtsec_detach(device_t dev) { struct dtsec_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->sc_ifnet; if (device_is_attached(dev)) { ether_ifdetach(ifp); /* Shutdown interface */ DTSEC_LOCK(sc); dtsec_if_deinit_locked(sc); DTSEC_UNLOCK(sc); } if (sc->sc_ifnet) { if_free(sc->sc_ifnet); sc->sc_ifnet = NULL; } if (sc->sc_mode == DTSEC_MODE_REGULAR) { /* Free RX/TX FQRs */ dtsec_rm_fqr_rx_free(sc); dtsec_rm_fqr_tx_free(sc); /* Free frame info pool */ dtsec_rm_fi_pool_free(sc); /* Free RX buffer pool */ dtsec_rm_pool_rx_free(sc); } dtsec_fm_mac_free(sc); dtsec_fm_port_free_both(sc); /* Destroy lock */ mtx_destroy(&sc->sc_lock); return (0); } int dtsec_suspend(device_t dev) { return (0); } int dtsec_resume(device_t dev) { return (0); } int dtsec_shutdown(device_t dev) { return (0); } /** @} */ /** * @group MII bus interface. * @{ */ int dtsec_miibus_readreg(device_t dev, int phy, int reg) { struct dtsec_softc *sc; sc = device_get_softc(dev); return (MIIBUS_READREG(sc->sc_mdio, phy, reg)); } int dtsec_miibus_writereg(device_t dev, int phy, int reg, int value) { struct dtsec_softc *sc; sc = device_get_softc(dev); return (MIIBUS_WRITEREG(sc->sc_mdio, phy, reg, value)); } void dtsec_miibus_statchg(device_t dev) { struct dtsec_softc *sc; e_EnetSpeed speed; bool duplex; int error; sc = device_get_softc(dev); DTSEC_LOCK_ASSERT(sc); duplex = ((sc->sc_mii->mii_media_active & IFM_GMASK) == IFM_FDX); switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: speed = e_ENET_SPEED_1000; break; case IFM_100_TX: speed = e_ENET_SPEED_100; break; case IFM_10_T: speed = e_ENET_SPEED_10; break; default: speed = e_ENET_SPEED_10; } error = FM_MAC_AdjustLink(sc->sc_mach, speed, duplex); if (error != E_OK) device_printf(sc->sc_dev, "error while adjusting MAC speed.\n"); } /** @} */ Index: projects/runtime-coverage/sys/dev/dpaa/if_dtsec.h =================================================================== --- projects/runtime-coverage/sys/dev/dpaa/if_dtsec.h (revision 325209) +++ projects/runtime-coverage/sys/dev/dpaa/if_dtsec.h (revision 325210) @@ -1,155 +1,156 @@ /*- * Copyright (c) 2011-2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef IF_DTSEC_H_ #define IF_DTSEC_H_ /** * @group dTSEC common API. * @{ */ #define DTSEC_MODE_REGULAR 0 #define DTSEC_MODE_INDEPENDENT 1 #define DTSEC_LOCK(sc) mtx_lock(&(sc)->sc_lock) #define DTSEC_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock) #define DTSEC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED) #define DTSEC_MII_LOCK(sc) mtx_lock(&(sc)->sc_mii_lock) #define DTSEC_MII_UNLOCK(sc) mtx_unlock(&(sc)->sc_mii_lock) enum eth_dev_type { ETH_DTSEC = 0x1, ETH_10GSEC = 0x2 }; struct dtsec_softc { /* XXX MII bus requires that struct ifnet is first!!! */ struct ifnet *sc_ifnet; device_t sc_dev; + struct resource *sc_mem; struct mtx sc_lock; int sc_mode; /* Methods */ int (*sc_port_rx_init) (struct dtsec_softc *sc, int unit); int (*sc_port_tx_init) (struct dtsec_softc *sc, int unit); void (*sc_start_locked) (struct dtsec_softc *sc); /* dTSEC data */ enum eth_dev_type sc_eth_dev_type; uint8_t sc_eth_id; uintptr_t sc_mac_mem_offset; e_EnetMode sc_mac_enet_mode; int sc_mac_mdio_irq; uint8_t sc_mac_addr[6]; int sc_port_rx_hw_id; int sc_port_tx_hw_id; uint32_t sc_port_tx_qman_chan; int sc_phy_addr; bool sc_hidden; device_t sc_mdio; /* Params from fman_bus driver */ vm_offset_t sc_fm_base; t_Handle sc_fmh; t_Handle sc_muramh; t_Handle sc_mach; t_Handle sc_rxph; t_Handle sc_txph; /* MII data */ struct mii_data *sc_mii; device_t sc_mii_dev; struct mtx sc_mii_lock; struct callout sc_tick_callout; /* RX Pool */ t_Handle sc_rx_pool; uint8_t sc_rx_bpid; uma_zone_t sc_rx_zone; char sc_rx_zname[64]; /* RX Frame Queue */ t_Handle sc_rx_fqr; uint32_t sc_rx_fqid; /* TX Frame Queue */ t_Handle sc_tx_fqr; bool sc_tx_fqr_full; t_Handle sc_tx_conf_fqr; uint32_t sc_tx_conf_fqid; /* Frame Info Zone */ uma_zone_t sc_fi_zone; char sc_fi_zname[64]; }; /** @} */ /** * @group dTSEC FMan PORT API. * @{ */ enum dtsec_fm_port_params { FM_PORT_LIODN_BASE = 0, FM_PORT_LIODN_OFFSET = 0, FM_PORT_MEM_ID = 0, FM_PORT_MEM_ATTR = MEMORY_ATTR_CACHEABLE, FM_PORT_BUFFER_SIZE = MCLBYTES, }; e_FmPortType dtsec_fm_port_rx_type(enum eth_dev_type type); void dtsec_fm_port_rx_exception_callback(t_Handle app, e_FmPortExceptions exception); void dtsec_fm_port_tx_exception_callback(t_Handle app, e_FmPortExceptions exception); e_FmPortType dtsec_fm_port_tx_type(enum eth_dev_type type); /** @} */ /** * @group dTSEC bus interface. * @{ */ int dtsec_attach(device_t dev); int dtsec_detach(device_t dev); int dtsec_suspend(device_t dev); int dtsec_resume(device_t dev); int dtsec_shutdown(device_t dev); int dtsec_miibus_readreg(device_t dev, int phy, int reg); int dtsec_miibus_writereg(device_t dev, int phy, int reg, int value); void dtsec_miibus_statchg(device_t dev); /** @} */ #endif /* IF_DTSEC_H_ */ Index: projects/runtime-coverage/sys/dev/dpaa/if_dtsec_fdt.c =================================================================== --- projects/runtime-coverage/sys/dev/dpaa/if_dtsec_fdt.c (revision 325209) +++ projects/runtime-coverage/sys/dev/dpaa/if_dtsec_fdt.c (revision 325210) @@ -1,230 +1,235 @@ /*- * Copyright (c) 2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include +#include + #include #include #include #include #include #include #include #include #include "miibus_if.h" #include #include #include "if_dtsec.h" #include "fman.h" static int dtsec_fdt_probe(device_t dev); static int dtsec_fdt_attach(device_t dev); static device_method_t dtsec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dtsec_fdt_probe), DEVMETHOD(device_attach, dtsec_fdt_attach), DEVMETHOD(device_detach, dtsec_detach), DEVMETHOD(device_shutdown, dtsec_shutdown), DEVMETHOD(device_suspend, dtsec_suspend), DEVMETHOD(device_resume, dtsec_resume), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, dtsec_miibus_readreg), DEVMETHOD(miibus_writereg, dtsec_miibus_writereg), DEVMETHOD(miibus_statchg, dtsec_miibus_statchg), { 0, 0 } }; static driver_t dtsec_driver = { "dtsec", dtsec_methods, sizeof(struct dtsec_softc), }; static devclass_t dtsec_devclass; DRIVER_MODULE(dtsec, fman, dtsec_driver, dtsec_devclass, 0, 0); DRIVER_MODULE(miibus, dtsec, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(dtsec, ether, 1, 1, 1); MODULE_DEPEND(dtsec, miibus, 1, 1, 1); static int dtsec_fdt_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "fsl,fman-dtsec") && !ofw_bus_is_compatible(dev, "fsl,fman-xgec")) return (ENXIO); device_set_desc(dev, "Freescale Data Path Triple Speed Ethernet " "Controller"); return (BUS_PROBE_DEFAULT); } static int find_mdio(phandle_t phy_node, device_t mac, device_t *mdio_dev) { device_t bus; while (phy_node > 0) { if (ofw_bus_node_is_compatible(phy_node, "fsl,fman-mdio")) break; phy_node = OF_parent(phy_node); } if (phy_node <= 0) return (ENOENT); bus = device_get_parent(mac); *mdio_dev = ofw_bus_find_child_device_by_phandle(bus, phy_node); return (0); } static int dtsec_fdt_attach(device_t dev) { struct dtsec_softc *sc; phandle_t enet_node, phy_node; phandle_t fman_rxtx_node[2]; char phy_type[6]; pcell_t fman_tx_cell; + int rid; sc = device_get_softc(dev); enet_node = ofw_bus_get_node(dev); if (OF_getprop(enet_node, "local-mac-address", (void *)sc->sc_mac_addr, 6) == -1) { device_printf(dev, "Could not load local-mac-addr property from DTS\n"); return (ENXIO); } /* Get link speed */ if (ofw_bus_is_compatible(dev, "fsl,fman-dtsec") != 0) sc->sc_eth_dev_type = ETH_DTSEC; else if (ofw_bus_is_compatible(dev, "fsl,fman-xgec") != 0) sc->sc_eth_dev_type = ETH_10GSEC; else return(ENXIO); /* Get MAC memory offset in SoC */ - if (OF_getprop(enet_node, "reg", (void *)&sc->sc_mac_mem_offset, - sizeof(sc->sc_mac_mem_offset)) <= 0) + rid = 0; + sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); + if (sc->sc_mem == NULL) return (ENXIO); /* Get PHY address */ if (OF_getprop(enet_node, "phy-handle", (void *)&phy_node, sizeof(phy_node)) <= 0) return (ENXIO); phy_node = OF_instance_to_package(phy_node); if (OF_getprop(phy_node, "reg", (void *)&sc->sc_phy_addr, sizeof(sc->sc_phy_addr)) <= 0) return (ENXIO); if (find_mdio(phy_node, dev, &sc->sc_mdio) != 0) return (ENXIO); /* Get PHY connection type */ if (OF_getprop(enet_node, "phy-connection-type", (void *)phy_type, sizeof(phy_type)) <= 0) return (ENXIO); if (!strcmp(phy_type, "sgmii")) sc->sc_mac_enet_mode = e_ENET_MODE_SGMII_1000; else if (!strcmp(phy_type, "rgmii")) sc->sc_mac_enet_mode = e_ENET_MODE_RGMII_1000; else if (!strcmp(phy_type, "xgmii")) /* We set 10 Gigabit mode flag however we don't support it */ sc->sc_mac_enet_mode = e_ENET_MODE_XGMII_10000; else return (ENXIO); /* Get RX/TX port handles */ if (OF_getprop(enet_node, "fsl,fman-ports", (void *)fman_rxtx_node, sizeof(fman_rxtx_node)) <= 0) return (ENXIO); if (fman_rxtx_node[0] == 0) return (ENXIO); if (fman_rxtx_node[1] == 0) return (ENXIO); fman_rxtx_node[0] = OF_instance_to_package(fman_rxtx_node[0]); fman_rxtx_node[1] = OF_instance_to_package(fman_rxtx_node[1]); if (ofw_bus_node_is_compatible(fman_rxtx_node[0], "fsl,fman-v2-port-rx") == 0) return (ENXIO); if (ofw_bus_node_is_compatible(fman_rxtx_node[1], "fsl,fman-v2-port-tx") == 0) return (ENXIO); /* Get RX port HW id */ if (OF_getprop(fman_rxtx_node[0], "reg", (void *)&sc->sc_port_rx_hw_id, sizeof(sc->sc_port_rx_hw_id)) <= 0) return (ENXIO); /* Get TX port HW id */ if (OF_getprop(fman_rxtx_node[1], "reg", (void *)&sc->sc_port_tx_hw_id, sizeof(sc->sc_port_tx_hw_id)) <= 0) return (ENXIO); if (OF_getprop(fman_rxtx_node[1], "cell-index", &fman_tx_cell, sizeof(fman_tx_cell)) <= 0) return (ENXIO); /* Get QMan channel */ sc->sc_port_tx_qman_chan = fman_qman_channel_id(device_get_parent(dev), fman_tx_cell); return (dtsec_attach(dev)); } Index: projects/runtime-coverage/sys/dev/dpaa/if_dtsec_rm.c =================================================================== --- projects/runtime-coverage/sys/dev/dpaa/if_dtsec_rm.c (revision 325209) +++ projects/runtime-coverage/sys/dev/dpaa/if_dtsec_rm.c (revision 325210) @@ -1,658 +1,658 @@ /*- * Copyright (c) 2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miibus_if.h" #include #include #include #include #include #include "fman.h" #include "bman.h" #include "qman.h" #include "if_dtsec.h" #include "if_dtsec_rm.h" /** * @group dTSEC RM private defines. * @{ */ #define DTSEC_BPOOLS_USED (1) #define DTSEC_MAX_TX_QUEUE_LEN 256 struct dtsec_rm_frame_info { struct mbuf *fi_mbuf; t_DpaaSGTE fi_sgt[DPAA_NUM_OF_SG_TABLE_ENTRY]; }; enum dtsec_rm_pool_params { DTSEC_RM_POOL_RX_LOW_MARK = 16, DTSEC_RM_POOL_RX_HIGH_MARK = 64, DTSEC_RM_POOL_RX_MAX_SIZE = 256, DTSEC_RM_POOL_FI_LOW_MARK = 16, DTSEC_RM_POOL_FI_HIGH_MARK = 64, DTSEC_RM_POOL_FI_MAX_SIZE = 256, }; enum dtsec_rm_fqr_params { DTSEC_RM_FQR_RX_CHANNEL = e_QM_FQ_CHANNEL_POOL1, DTSEC_RM_FQR_RX_WQ = 1, DTSEC_RM_FQR_TX_CONF_CHANNEL = e_QM_FQ_CHANNEL_SWPORTAL0, DTSEC_RM_FQR_TX_WQ = 1, DTSEC_RM_FQR_TX_CONF_WQ = 1 }; /** @} */ /** * @group dTSEC Frame Info routines. * @{ */ void dtsec_rm_fi_pool_free(struct dtsec_softc *sc) { if (sc->sc_fi_zone != NULL) uma_zdestroy(sc->sc_fi_zone); } int dtsec_rm_fi_pool_init(struct dtsec_softc *sc) { snprintf(sc->sc_fi_zname, sizeof(sc->sc_fi_zname), "%s: Frame Info", device_get_nameunit(sc->sc_dev)); sc->sc_fi_zone = uma_zcreate(sc->sc_fi_zname, sizeof(struct dtsec_rm_frame_info), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); if (sc->sc_fi_zone == NULL) return (EIO); return (0); } static struct dtsec_rm_frame_info * dtsec_rm_fi_alloc(struct dtsec_softc *sc) { struct dtsec_rm_frame_info *fi; fi = uma_zalloc(sc->sc_fi_zone, M_NOWAIT); return (fi); } static void dtsec_rm_fi_free(struct dtsec_softc *sc, struct dtsec_rm_frame_info *fi) { uma_zfree(sc->sc_fi_zone, fi); } /** @} */ /** * @group dTSEC FMan PORT routines. * @{ */ int dtsec_rm_fm_port_rx_init(struct dtsec_softc *sc, int unit) { t_FmPortParams params; t_FmPortRxParams *rx_params; t_FmExtPools *pool_params; t_Error error; memset(¶ms, 0, sizeof(params)); params.baseAddr = sc->sc_fm_base + sc->sc_port_rx_hw_id; params.h_Fm = sc->sc_fmh; params.portType = dtsec_fm_port_rx_type(sc->sc_eth_dev_type); params.portId = sc->sc_eth_id; - params.independentModeEnable = FALSE; + params.independentModeEnable = false; params.liodnBase = FM_PORT_LIODN_BASE; params.f_Exception = dtsec_fm_port_rx_exception_callback; params.h_App = sc; rx_params = ¶ms.specificParams.rxParams; rx_params->errFqid = sc->sc_rx_fqid; rx_params->dfltFqid = sc->sc_rx_fqid; rx_params->liodnOffset = 0; pool_params = &rx_params->extBufPools; pool_params->numOfPoolsUsed = DTSEC_BPOOLS_USED; pool_params->extBufPool->id = sc->sc_rx_bpid; pool_params->extBufPool->size = FM_PORT_BUFFER_SIZE; sc->sc_rxph = FM_PORT_Config(¶ms); if (sc->sc_rxph == NULL) { device_printf(sc->sc_dev, "couldn't configure FM Port RX.\n"); return (ENXIO); } error = FM_PORT_Init(sc->sc_rxph); if (error != E_OK) { device_printf(sc->sc_dev, "couldn't initialize FM Port RX.\n"); FM_PORT_Free(sc->sc_rxph); return (ENXIO); } if (bootverbose) device_printf(sc->sc_dev, "RX hw port 0x%02x initialized.\n", sc->sc_port_rx_hw_id); return (0); } int dtsec_rm_fm_port_tx_init(struct dtsec_softc *sc, int unit) { t_FmPortParams params; t_FmPortNonRxParams *tx_params; t_Error error; memset(¶ms, 0, sizeof(params)); params.baseAddr = sc->sc_fm_base + sc->sc_port_tx_hw_id; params.h_Fm = sc->sc_fmh; params.portType = dtsec_fm_port_tx_type(sc->sc_eth_dev_type); params.portId = sc->sc_eth_id; - params.independentModeEnable = FALSE; + params.independentModeEnable = false; params.liodnBase = FM_PORT_LIODN_BASE; params.f_Exception = dtsec_fm_port_tx_exception_callback; params.h_App = sc; tx_params = ¶ms.specificParams.nonRxParams; tx_params->errFqid = sc->sc_tx_conf_fqid; tx_params->dfltFqid = sc->sc_tx_conf_fqid; tx_params->qmChannel = sc->sc_port_tx_qman_chan; #ifdef FM_OP_PARTITION_ERRATA_FMANx8 tx_params->opLiodnOffset = 0; #endif sc->sc_txph = FM_PORT_Config(¶ms); if (sc->sc_txph == NULL) { device_printf(sc->sc_dev, "couldn't configure FM Port TX.\n"); return (ENXIO); } error = FM_PORT_Init(sc->sc_txph); if (error != E_OK) { device_printf(sc->sc_dev, "couldn't initialize FM Port TX.\n"); FM_PORT_Free(sc->sc_txph); return (ENXIO); } if (bootverbose) device_printf(sc->sc_dev, "TX hw port 0x%02x initialized.\n", sc->sc_port_tx_hw_id); return (0); } /** @} */ /** * @group dTSEC buffer pools routines. * @{ */ static t_Error dtsec_rm_pool_rx_put_buffer(t_Handle h_BufferPool, uint8_t *buffer, t_Handle context) { struct dtsec_softc *sc; sc = h_BufferPool; uma_zfree(sc->sc_rx_zone, buffer); return (E_OK); } static uint8_t * dtsec_rm_pool_rx_get_buffer(t_Handle h_BufferPool, t_Handle *context) { struct dtsec_softc *sc; uint8_t *buffer; sc = h_BufferPool; buffer = uma_zalloc(sc->sc_rx_zone, M_NOWAIT); return (buffer); } static void dtsec_rm_pool_rx_depleted(t_Handle h_App, bool in) { struct dtsec_softc *sc; unsigned int count; sc = h_App; if (!in) return; while (1) { count = bman_count(sc->sc_rx_pool); if (count > DTSEC_RM_POOL_RX_HIGH_MARK) return; bman_pool_fill(sc->sc_rx_pool, DTSEC_RM_POOL_RX_HIGH_MARK); } } void dtsec_rm_pool_rx_free(struct dtsec_softc *sc) { if (sc->sc_rx_pool != NULL) bman_pool_destroy(sc->sc_rx_pool); if (sc->sc_rx_zone != NULL) uma_zdestroy(sc->sc_rx_zone); } int dtsec_rm_pool_rx_init(struct dtsec_softc *sc) { /* FM_PORT_BUFFER_SIZE must be less than PAGE_SIZE */ CTASSERT(FM_PORT_BUFFER_SIZE < PAGE_SIZE); snprintf(sc->sc_rx_zname, sizeof(sc->sc_rx_zname), "%s: RX Buffers", device_get_nameunit(sc->sc_dev)); sc->sc_rx_zone = uma_zcreate(sc->sc_rx_zname, FM_PORT_BUFFER_SIZE, NULL, NULL, NULL, NULL, FM_PORT_BUFFER_SIZE - 1, 0); if (sc->sc_rx_zone == NULL) return (EIO); sc->sc_rx_pool = bman_pool_create(&sc->sc_rx_bpid, FM_PORT_BUFFER_SIZE, 0, 0, DTSEC_RM_POOL_RX_MAX_SIZE, dtsec_rm_pool_rx_get_buffer, dtsec_rm_pool_rx_put_buffer, DTSEC_RM_POOL_RX_LOW_MARK, DTSEC_RM_POOL_RX_HIGH_MARK, 0, 0, dtsec_rm_pool_rx_depleted, sc, NULL, NULL); if (sc->sc_rx_pool == NULL) { device_printf(sc->sc_dev, "NULL rx pool somehow\n"); dtsec_rm_pool_rx_free(sc); return (EIO); } return (0); } /** @} */ /** * @group dTSEC Frame Queue Range routines. * @{ */ static void dtsec_rm_fqr_mext_free(struct mbuf *m) { struct dtsec_softc *sc; void *buffer; buffer = m->m_ext.ext_arg1; sc = m->m_ext.ext_arg2; if (bman_count(sc->sc_rx_pool) <= DTSEC_RM_POOL_RX_MAX_SIZE) bman_put_buffer(sc->sc_rx_pool, buffer); else dtsec_rm_pool_rx_put_buffer(sc, buffer, NULL); } static e_RxStoreResponse dtsec_rm_fqr_rx_callback(t_Handle app, t_Handle fqr, t_Handle portal, uint32_t fqid_off, t_DpaaFD *frame) { struct dtsec_softc *sc; struct mbuf *m; m = NULL; sc = app; KASSERT(DPAA_FD_GET_FORMAT(frame) == e_DPAA_FD_FORMAT_TYPE_SHORT_SBSF, ("%s(): Got unsupported frame format 0x%02X!", __func__, DPAA_FD_GET_FORMAT(frame))); KASSERT(DPAA_FD_GET_OFFSET(frame) == 0, ("%s(): Only offset 0 is supported!", __func__)); if (DPAA_FD_GET_STATUS(frame) != 0) { device_printf(sc->sc_dev, "RX error: 0x%08X\n", DPAA_FD_GET_STATUS(frame)); goto err; } m = m_gethdr(M_NOWAIT, MT_HEADER); if (m == NULL) goto err; m_extadd(m, DPAA_FD_GET_ADDR(frame), FM_PORT_BUFFER_SIZE, dtsec_rm_fqr_mext_free, DPAA_FD_GET_ADDR(frame), sc, 0, EXT_NET_DRV); m->m_pkthdr.rcvif = sc->sc_ifnet; m->m_len = DPAA_FD_GET_LENGTH(frame); m_fixhdr(m); (*sc->sc_ifnet->if_input)(sc->sc_ifnet, m); return (e_RX_STORE_RESPONSE_CONTINUE); err: bman_put_buffer(sc->sc_rx_pool, DPAA_FD_GET_ADDR(frame)); if (m != NULL) m_freem(m); return (e_RX_STORE_RESPONSE_CONTINUE); } static e_RxStoreResponse dtsec_rm_fqr_tx_confirm_callback(t_Handle app, t_Handle fqr, t_Handle portal, uint32_t fqid_off, t_DpaaFD *frame) { struct dtsec_rm_frame_info *fi; struct dtsec_softc *sc; unsigned int qlen; t_DpaaSGTE *sgt0; sc = app; if (DPAA_FD_GET_STATUS(frame) != 0) device_printf(sc->sc_dev, "TX error: 0x%08X\n", DPAA_FD_GET_STATUS(frame)); /* * We are storing struct dtsec_rm_frame_info in first entry * of scatter-gather table. */ sgt0 = DPAA_FD_GET_ADDR(frame); fi = DPAA_SGTE_GET_ADDR(sgt0); /* Free transmitted frame */ m_freem(fi->fi_mbuf); dtsec_rm_fi_free(sc, fi); qlen = qman_fqr_get_counter(sc->sc_tx_conf_fqr, 0, e_QM_FQR_COUNTERS_FRAME); if (qlen == 0) { DTSEC_LOCK(sc); if (sc->sc_tx_fqr_full) { sc->sc_tx_fqr_full = 0; dtsec_rm_if_start_locked(sc); } DTSEC_UNLOCK(sc); } return (e_RX_STORE_RESPONSE_CONTINUE); } void dtsec_rm_fqr_rx_free(struct dtsec_softc *sc) { if (sc->sc_rx_fqr) qman_fqr_free(sc->sc_rx_fqr); } int dtsec_rm_fqr_rx_init(struct dtsec_softc *sc) { t_Error error; t_Handle fqr; /* Default Frame Queue */ fqr = qman_fqr_create(1, DTSEC_RM_FQR_RX_CHANNEL, DTSEC_RM_FQR_RX_WQ, - FALSE, 0, FALSE, FALSE, TRUE, FALSE, 0, 0, 0); + false, 0, false, false, true, false, 0, 0, 0); if (fqr == NULL) { device_printf(sc->sc_dev, "could not create default RX queue" "\n"); return (EIO); } sc->sc_rx_fqr = fqr; sc->sc_rx_fqid = qman_fqr_get_base_fqid(fqr); error = qman_fqr_register_cb(fqr, dtsec_rm_fqr_rx_callback, sc); if (error != E_OK) { device_printf(sc->sc_dev, "could not register RX callback\n"); dtsec_rm_fqr_rx_free(sc); return (EIO); } return (0); } void dtsec_rm_fqr_tx_free(struct dtsec_softc *sc) { if (sc->sc_tx_fqr) qman_fqr_free(sc->sc_tx_fqr); if (sc->sc_tx_conf_fqr) qman_fqr_free(sc->sc_tx_conf_fqr); } int dtsec_rm_fqr_tx_init(struct dtsec_softc *sc) { t_Error error; t_Handle fqr; /* TX Frame Queue */ fqr = qman_fqr_create(1, sc->sc_port_tx_qman_chan, - DTSEC_RM_FQR_TX_WQ, FALSE, 0, FALSE, FALSE, TRUE, FALSE, 0, 0, 0); + DTSEC_RM_FQR_TX_WQ, false, 0, false, false, true, false, 0, 0, 0); if (fqr == NULL) { device_printf(sc->sc_dev, "could not create default TX queue" "\n"); return (EIO); } sc->sc_tx_fqr = fqr; /* TX Confirmation Frame Queue */ fqr = qman_fqr_create(1, DTSEC_RM_FQR_TX_CONF_CHANNEL, - DTSEC_RM_FQR_TX_CONF_WQ, FALSE, 0, FALSE, FALSE, TRUE, FALSE, 0, 0, + DTSEC_RM_FQR_TX_CONF_WQ, false, 0, false, false, true, false, 0, 0, 0); if (fqr == NULL) { device_printf(sc->sc_dev, "could not create TX confirmation " "queue\n"); dtsec_rm_fqr_tx_free(sc); return (EIO); } sc->sc_tx_conf_fqr = fqr; sc->sc_tx_conf_fqid = qman_fqr_get_base_fqid(fqr); error = qman_fqr_register_cb(fqr, dtsec_rm_fqr_tx_confirm_callback, sc); if (error != E_OK) { device_printf(sc->sc_dev, "could not register TX confirmation " "callback\n"); dtsec_rm_fqr_tx_free(sc); return (EIO); } return (0); } /** @} */ /** * @group dTSEC IFnet routines. * @{ */ void dtsec_rm_if_start_locked(struct dtsec_softc *sc) { vm_size_t dsize, psize, ssize; struct dtsec_rm_frame_info *fi; unsigned int qlen, i; struct mbuf *m0, *m; vm_offset_t vaddr; vm_paddr_t paddr; t_DpaaFD fd; DTSEC_LOCK_ASSERT(sc); /* TODO: IFF_DRV_OACTIVE */ if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) == 0) return; if ((sc->sc_ifnet->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING) return; while (!IFQ_DRV_IS_EMPTY(&sc->sc_ifnet->if_snd)) { /* Check length of the TX queue */ qlen = qman_fqr_get_counter(sc->sc_tx_fqr, 0, e_QM_FQR_COUNTERS_FRAME); if (qlen >= DTSEC_MAX_TX_QUEUE_LEN) { sc->sc_tx_fqr_full = 1; return; } fi = dtsec_rm_fi_alloc(sc); if (fi == NULL) return; IFQ_DRV_DEQUEUE(&sc->sc_ifnet->if_snd, m0); if (m0 == NULL) { dtsec_rm_fi_free(sc, fi); return; } i = 0; m = m0; psize = 0; dsize = 0; fi->fi_mbuf = m0; while (m && i < DPAA_NUM_OF_SG_TABLE_ENTRY) { if (m->m_len == 0) continue; /* * First entry in scatter-gather table is used to keep * pointer to frame info structure. */ DPAA_SGTE_SET_ADDR(&fi->fi_sgt[i], (void *)fi); DPAA_SGTE_SET_LENGTH(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_EXTENSION(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_BPID(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_OFFSET(&fi->fi_sgt[i], 0); i++; dsize = m->m_len; vaddr = (vm_offset_t)m->m_data; while (dsize > 0 && i < DPAA_NUM_OF_SG_TABLE_ENTRY) { paddr = XX_VirtToPhys((void *)vaddr); ssize = PAGE_SIZE - (paddr & PAGE_MASK); if (m->m_len < ssize) ssize = m->m_len; DPAA_SGTE_SET_ADDR(&fi->fi_sgt[i], (void *)vaddr); DPAA_SGTE_SET_LENGTH(&fi->fi_sgt[i], ssize); DPAA_SGTE_SET_EXTENSION(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_BPID(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_OFFSET(&fi->fi_sgt[i], 0); dsize -= ssize; vaddr += ssize; psize += ssize; i++; } if (dsize > 0) break; m = m->m_next; } /* Check if SG table was constructed properly */ if (m != NULL || dsize != 0) { dtsec_rm_fi_free(sc, fi); m_freem(m0); continue; } DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i-1], 1); DPAA_FD_SET_ADDR(&fd, fi->fi_sgt); DPAA_FD_SET_LENGTH(&fd, psize); DPAA_FD_SET_FORMAT(&fd, e_DPAA_FD_FORMAT_TYPE_SHORT_MBSF); fd.liodn = 0; fd.bpid = 0; fd.elion = 0; DPAA_FD_SET_OFFSET(&fd, 0); DPAA_FD_SET_STATUS(&fd, 0); DTSEC_UNLOCK(sc); if (qman_fqr_enqueue(sc->sc_tx_fqr, 0, &fd) != E_OK) { dtsec_rm_fi_free(sc, fi); m_freem(m0); } DTSEC_LOCK(sc); } } /** @} */ Index: projects/runtime-coverage/sys/net/iflib.c =================================================================== --- projects/runtime-coverage/sys/net/iflib.c (revision 325209) +++ projects/runtime-coverage/sys/net/iflib.c (revision 325210) @@ -1,5707 +1,5707 @@ /*- * Copyright (c) 2014-2017, Matthew Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Neither the name of Matthew Macy nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #if defined(__i386__) || defined(__amd64__) #include #include #include #include #include #include #endif #include /* * enable accounting of every mbuf as it comes in to and goes out of * iflib's software descriptor references */ #define MEMORY_LOGGING 0 /* * Enable mbuf vectors for compressing long mbuf chains */ /* * NB: * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead * we prefetch needs to be determined by the time spent in m_free vis a vis * the cost of a prefetch. This will of course vary based on the workload: * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which * is quite expensive, thus suggesting very little prefetch. * - small packet forwarding which is just returning a single mbuf to * UMA will typically be very fast vis a vis the cost of a memory * access. */ /* * File organization: * - private structures * - iflib private utility functions * - ifnet functions * - vlan registry and other exported functions * - iflib public core functions * * */ static MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); struct iflib_txq; typedef struct iflib_txq *iflib_txq_t; struct iflib_rxq; typedef struct iflib_rxq *iflib_rxq_t; struct iflib_fl; typedef struct iflib_fl *iflib_fl_t; struct iflib_ctx; static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); typedef struct iflib_filter_info { driver_filter_t *ifi_filter; void *ifi_filter_arg; struct grouptask *ifi_task; void *ifi_ctx; } *iflib_filter_info_t; struct iflib_ctx { KOBJ_FIELDS; /* * Pointer to hardware driver's softc */ void *ifc_softc; device_t ifc_dev; if_t ifc_ifp; cpuset_t ifc_cpus; if_shared_ctx_t ifc_sctx; struct if_softc_ctx ifc_softc_ctx; struct mtx ifc_mtx; uint16_t ifc_nhwtxqs; uint16_t ifc_nhwrxqs; iflib_txq_t ifc_txqs; iflib_rxq_t ifc_rxqs; uint32_t ifc_if_flags; uint32_t ifc_flags; uint32_t ifc_max_fl_buf_size; int ifc_in_detach; int ifc_link_state; int ifc_link_irq; int ifc_watchdog_events; struct cdev *ifc_led_dev; struct resource *ifc_msix_mem; struct if_irq ifc_legacy_irq; struct grouptask ifc_admin_task; struct grouptask ifc_vflr_task; struct iflib_filter_info ifc_filter_info; struct ifmedia ifc_media; struct sysctl_oid *ifc_sysctl_node; uint16_t ifc_sysctl_ntxqs; uint16_t ifc_sysctl_nrxqs; uint16_t ifc_sysctl_qs_eq_override; uint16_t ifc_sysctl_rx_budget; qidx_t ifc_sysctl_ntxds[8]; qidx_t ifc_sysctl_nrxds[8]; struct if_txrx ifc_txrx; #define isc_txd_encap ifc_txrx.ift_txd_encap #define isc_txd_flush ifc_txrx.ift_txd_flush #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update #define isc_rxd_available ifc_txrx.ift_rxd_available #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_rxd_flush ifc_txrx.ift_rxd_flush #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_legacy_intr ifc_txrx.ift_legacy_intr eventhandler_tag ifc_vlan_attach_event; eventhandler_tag ifc_vlan_detach_event; uint8_t ifc_mac[ETHER_ADDR_LEN]; char ifc_mtx_name[16]; }; void * iflib_get_softc(if_ctx_t ctx) { return (ctx->ifc_softc); } device_t iflib_get_dev(if_ctx_t ctx) { return (ctx->ifc_dev); } if_t iflib_get_ifp(if_ctx_t ctx) { return (ctx->ifc_ifp); } struct ifmedia * iflib_get_media(if_ctx_t ctx) { return (&ctx->ifc_media); } void iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) { bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN); } if_softc_ctx_t iflib_get_softc_ctx(if_ctx_t ctx) { return (&ctx->ifc_softc_ctx); } if_shared_ctx_t iflib_get_sctx(if_ctx_t ctx) { return (ctx->ifc_sctx); } #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2) #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1))) #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) #define RX_SW_DESC_MAP_CREATED (1 << 0) #define TX_SW_DESC_MAP_CREATED (1 << 1) #define RX_SW_DESC_INUSE (1 << 3) #define TX_SW_DESC_MAPPED (1 << 4) #define M_TOOBIG M_PROTO1 typedef struct iflib_sw_rx_desc_array { bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ struct mbuf **ifsd_m; /* pkthdr mbufs */ caddr_t *ifsd_cl; /* direct cluster pointer for rx */ uint8_t *ifsd_flags; } iflib_rxsd_array_t; typedef struct iflib_sw_tx_desc_array { bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ struct mbuf **ifsd_m; /* pkthdr mbufs */ uint8_t *ifsd_flags; } if_txsd_vec_t; /* magic number that should be high enough for any hardware */ #define IFLIB_MAX_TX_SEGS 128 /* bnxt supports 64 with hardware LRO enabled */ #define IFLIB_MAX_RX_SEGS 64 #define IFLIB_RX_COPY_THRESH 128 #define IFLIB_MAX_RX_REFRESH 32 /* The minimum descriptors per second before we start coalescing */ #define IFLIB_MIN_DESC_SEC 16384 #define IFLIB_DEFAULT_TX_UPDATE_FREQ 16 #define IFLIB_QUEUE_IDLE 0 #define IFLIB_QUEUE_HUNG 1 #define IFLIB_QUEUE_WORKING 2 /* maximum number of txqs that can share an rx interrupt */ #define IFLIB_MAX_TX_SHARED_INTR 4 /* this should really scale with ring size - this is a fairly arbitrary value */ #define TX_BATCH_SIZE 32 #define IFLIB_RESTART_BUDGET 8 #define IFC_LEGACY 0x001 #define IFC_QFLUSH 0x002 #define IFC_MULTISEG 0x004 #define IFC_DMAR 0x008 #define IFC_SC_ALLOCATED 0x010 #define IFC_INIT_DONE 0x020 #define IFC_PREFETCH 0x040 #define IFC_DO_RESET 0x080 #define IFC_CHECK_HUNG 0x100 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) struct iflib_txq { qidx_t ift_in_use; qidx_t ift_cidx; qidx_t ift_cidx_processed; qidx_t ift_pidx; uint8_t ift_gen; uint8_t ift_br_offset; uint16_t ift_npending; uint16_t ift_db_pending; uint16_t ift_rs_pending; /* implicit pad */ uint8_t ift_txd_size[8]; uint64_t ift_processed; uint64_t ift_cleaned; uint64_t ift_cleaned_prev; #if MEMORY_LOGGING uint64_t ift_enqueued; uint64_t ift_dequeued; #endif uint64_t ift_no_tx_dma_setup; uint64_t ift_no_desc_avail; uint64_t ift_mbuf_defrag_failed; uint64_t ift_mbuf_defrag; uint64_t ift_map_failed; uint64_t ift_txd_encap_efbig; uint64_t ift_pullups; struct mtx ift_mtx; struct mtx ift_db_mtx; /* constant values */ if_ctx_t ift_ctx; struct ifmp_ring *ift_br; struct grouptask ift_task; qidx_t ift_size; uint16_t ift_id; struct callout ift_timer; if_txsd_vec_t ift_sds; uint8_t ift_qstatus; uint8_t ift_closed; uint8_t ift_update_freq; struct iflib_filter_info ift_filter_info; bus_dma_tag_t ift_desc_tag; bus_dma_tag_t ift_tso_desc_tag; iflib_dma_info_t ift_ifdi; #define MTX_NAME_LEN 16 char ift_mtx_name[MTX_NAME_LEN]; char ift_db_mtx_name[MTX_NAME_LEN]; bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); #ifdef IFLIB_DIAGNOSTICS uint64_t ift_cpu_exec_count[256]; #endif } __aligned(CACHE_LINE_SIZE); struct iflib_fl { qidx_t ifl_cidx; qidx_t ifl_pidx; qidx_t ifl_credits; uint8_t ifl_gen; uint8_t ifl_rxd_size; #if MEMORY_LOGGING uint64_t ifl_m_enqueued; uint64_t ifl_m_dequeued; uint64_t ifl_cl_enqueued; uint64_t ifl_cl_dequeued; #endif /* implicit pad */ bitstr_t *ifl_rx_bitmap; qidx_t ifl_fragidx; /* constant */ qidx_t ifl_size; uint16_t ifl_buf_size; uint16_t ifl_cltype; uma_zone_t ifl_zone; iflib_rxsd_array_t ifl_sds; iflib_rxq_t ifl_rxq; uint8_t ifl_id; bus_dma_tag_t ifl_desc_tag; iflib_dma_info_t ifl_ifdi; uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH]; qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; } __aligned(CACHE_LINE_SIZE); static inline qidx_t get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen) { qidx_t used; if (pidx > cidx) used = pidx - cidx; else if (pidx < cidx) used = size - cidx + pidx; else if (gen == 0 && pidx == cidx) used = 0; else if (gen == 1 && pidx == cidx) used = size; else panic("bad state"); return (used); } #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) #define IDXDIFF(head, tail, wrap) \ ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) struct iflib_rxq { /* If there is a separate completion queue - * these are the cq cidx and pidx. Otherwise * these are unused. */ qidx_t ifr_size; qidx_t ifr_cq_cidx; qidx_t ifr_cq_pidx; uint8_t ifr_cq_gen; uint8_t ifr_fl_offset; if_ctx_t ifr_ctx; iflib_fl_t ifr_fl; uint64_t ifr_rx_irq; uint16_t ifr_id; uint8_t ifr_lro_enabled; uint8_t ifr_nfl; uint8_t ifr_ntxqirq; uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR]; struct lro_ctrl ifr_lc; struct grouptask ifr_task; struct iflib_filter_info ifr_filter_info; iflib_dma_info_t ifr_ifdi; /* dynamically allocate if any drivers need a value substantially larger than this */ struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); #ifdef IFLIB_DIAGNOSTICS uint64_t ifr_cpu_exec_count[256]; #endif } __aligned(CACHE_LINE_SIZE); typedef struct if_rxsd { caddr_t *ifsd_cl; struct mbuf **ifsd_m; iflib_fl_t ifsd_fl; qidx_t ifsd_cidx; } *if_rxsd_t; /* multiple of word size */ #ifdef __LP64__ #define PKT_INFO_SIZE 6 #define RXD_INFO_SIZE 5 #define PKT_TYPE uint64_t #else #define PKT_INFO_SIZE 11 #define RXD_INFO_SIZE 8 #define PKT_TYPE uint32_t #endif #define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3) #define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4) typedef struct if_pkt_info_pad { PKT_TYPE pkt_val[PKT_INFO_SIZE]; } *if_pkt_info_pad_t; typedef struct if_rxd_info_pad { PKT_TYPE rxd_val[RXD_INFO_SIZE]; } *if_rxd_info_pad_t; CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info)); CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info)); static inline void pkt_info_zero(if_pkt_info_t pi) { if_pkt_info_pad_t pi_pad; pi_pad = (if_pkt_info_pad_t)pi; pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; #ifndef __LP64__ pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; #endif } static inline void rxd_info_zero(if_rxd_info_t ri) { if_rxd_info_pad_t ri_pad; int i; ri_pad = (if_rxd_info_pad_t)ri; for (i = 0; i < RXD_LOOP_BOUND; i += 4) { ri_pad->rxd_val[i] = 0; ri_pad->rxd_val[i+1] = 0; ri_pad->rxd_val[i+2] = 0; ri_pad->rxd_val[i+3] = 0; } #ifdef __LP64__ ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0; #endif } /* * Only allow a single packet to take up most 1/nth of the tx ring */ #define MAX_SINGLE_PACKET_FRACTION 12 #define IF_BAD_DMA (bus_addr_t)-1 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) #define CTX_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_mtx, _name, "iflib ctx lock", MTX_DEF) #define CTX_LOCK(ctx) mtx_lock(&(ctx)->ifc_mtx) #define CTX_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_mtx) #define CTX_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_mtx) #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) /* Our boot-time initialization hook */ static int iflib_module_event_handler(module_t, int, void *); static moduledata_t iflib_moduledata = { "iflib", iflib_module_event_handler, NULL }; DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); MODULE_VERSION(iflib, 1); MODULE_DEPEND(iflib, pci, 1, 1, 1); MODULE_DEPEND(iflib, ether, 1, 1, 1); TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); TASKQGROUP_DEFINE(if_config_tqg, 1, 1); #ifndef IFLIB_DEBUG_COUNTERS #ifdef INVARIANTS #define IFLIB_DEBUG_COUNTERS 1 #else #define IFLIB_DEBUG_COUNTERS 0 #endif /* !INVARIANTS */ #endif static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0, "iflib driver parameters"); /* * XXX need to ensure that this can't accidentally cause the head to be moved backwards */ static int iflib_min_tx_latency = 0; SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); static int iflib_no_tx_batch = 0; SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW, &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput"); #if IFLIB_DEBUG_COUNTERS static int iflib_tx_seen; static int iflib_tx_sent; static int iflib_tx_encap; static int iflib_rx_allocs; static int iflib_fl_refills; static int iflib_fl_refills_large; static int iflib_tx_frees; SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, &iflib_tx_seen, 0, "# tx mbufs seen"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, &iflib_tx_sent, 0, "# tx mbufs sent"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, &iflib_tx_encap, 0, "# tx mbufs encapped"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, &iflib_tx_frees, 0, "# tx frees"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, &iflib_rx_allocs, 0, "# rx allocations"); SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, &iflib_fl_refills, 0, "# refills"); SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, &iflib_fl_refills_large, 0, "# large refills"); static int iflib_txq_drain_flushing; static int iflib_txq_drain_oactive; static int iflib_txq_drain_notready; static int iflib_txq_drain_encapfail; SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, &iflib_txq_drain_flushing, 0, "# drain flushes"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, &iflib_txq_drain_oactive, 0, "# drain oactives"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, &iflib_txq_drain_notready, 0, "# drain notready"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD, &iflib_txq_drain_encapfail, 0, "# drain encap fails"); static int iflib_encap_load_mbuf_fail; static int iflib_encap_txq_avail_fail; static int iflib_encap_txd_encap_fail; SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); static int iflib_task_fn_rxs; static int iflib_rx_intr_enables; static int iflib_fast_intrs; static int iflib_intr_link; static int iflib_intr_msix; static int iflib_rx_unavail; static int iflib_rx_ctx_inactive; static int iflib_rx_zero_len; static int iflib_rx_if_input; static int iflib_rx_mbuf_null; static int iflib_rxd_flush; static int iflib_verbose_debug; SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD, &iflib_intr_link, 0, "# intr link calls"); SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD, &iflib_intr_msix, 0, "# intr msix calls"); SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, &iflib_rx_intr_enables, 0, "# rx intr enables"); SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, &iflib_fast_intrs, 0, "# fast_intr calls"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, &iflib_rx_unavail, 0, "# times rxeof called with no available data"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD, &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, &iflib_rx_if_input, 0, "# times rxeof called if_input"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD, &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf"); SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, &iflib_rxd_flush, 0, "# times rxd_flush called"); SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, &iflib_verbose_debug, 0, "enable verbose debugging"); #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) static void iflib_debug_reset(void) { iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = iflib_txq_drain_flushing = iflib_txq_drain_oactive = iflib_txq_drain_notready = iflib_txq_drain_encapfail = iflib_encap_load_mbuf_fail = iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = iflib_intr_link = iflib_intr_msix = iflib_rx_unavail = iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input = iflib_rx_mbuf_null = iflib_rxd_flush = 0; } #else #define DBG_COUNTER_INC(name) static void iflib_debug_reset(void) {} #endif #define IFLIB_DEBUG 0 static void iflib_tx_structures_free(if_ctx_t ctx); static void iflib_rx_structures_free(if_ctx_t ctx); static int iflib_queues_alloc(if_ctx_t ctx); static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget); static int iflib_qset_structures_setup(if_ctx_t ctx); static int iflib_msix_init(if_ctx_t ctx); static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, char *str); static void iflib_txq_check_drain(iflib_txq_t txq, int budget); static uint32_t iflib_txq_can_drain(struct ifmp_ring *); static int iflib_register(if_ctx_t); static void iflib_init_locked(if_ctx_t ctx); static void iflib_add_device_sysctl_pre(if_ctx_t ctx); static void iflib_add_device_sysctl_post(if_ctx_t ctx); static void iflib_ifmp_purge(iflib_txq_t txq); static void _iflib_pre_assert(if_softc_ctx_t scctx); static void iflib_stop(if_ctx_t ctx); static void iflib_if_init_locked(if_ctx_t ctx); #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * iflib_fixup_rx(struct mbuf *m); #endif #ifdef DEV_NETMAP #include #include #include MODULE_DEPEND(iflib, netmap, 1, 1, 1); static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init); /* * device-specific sysctl variables: * * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. * During regular operations the CRC is stripped, but on some * hardware reception of frames not multiple of 64 is slower, * so using crcstrip=0 helps in benchmarks. * * iflib_rx_miss, iflib_rx_miss_bufs: * count packets that might be missed due to lost interrupts. */ SYSCTL_DECL(_dev_netmap); /* * The xl driver by default strips CRCs and we do not override it. */ int iflib_crcstrip = 1; SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames"); int iflib_rx_miss, iflib_rx_miss_bufs; SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr"); SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs"); /* * Register/unregister. We are already under netmap lock. * Only called on the first register or the last unregister. */ static int iflib_netmap_register(struct netmap_adapter *na, int onoff) { struct ifnet *ifp = na->ifp; if_ctx_t ctx = ifp->if_softc; int status; CTX_LOCK(ctx); IFDI_INTR_DISABLE(ctx); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if (!CTX_IS_VF(ctx)) IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); /* enable or disable flags and callbacks in na and ifp */ if (onoff) { nm_set_native_flags(na); } else { nm_clear_native_flags(na); } iflib_stop(ctx); iflib_init_locked(ctx); IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; if (status) nm_clear_native_flags(na); CTX_UNLOCK(ctx); return (status); } -static void -iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) -{ - iflib_fl_t fl; - - fl = &rxq->ifr_fl[flid]; - iru->iru_paddrs = fl->ifl_bus_addrs; - iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; - iru->iru_idxs = fl->ifl_rxd_idxs; - iru->iru_qsidx = rxq->ifr_id; - iru->iru_buf_size = fl->ifl_buf_size; - iru->iru_flidx = fl->ifl_id; -} - static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init) { struct netmap_adapter *na = kring->na; u_int const lim = kring->nkr_num_slots - 1; u_int head = kring->rhead; struct netmap_ring *ring = kring->ring; bus_dmamap_t *map; struct if_rxd_update iru; if_ctx_t ctx = rxq->ifr_ctx; iflib_fl_t fl = &rxq->ifr_fl[0]; uint32_t refill_pidx, nic_i; if (nm_i == head && __predict_true(!init)) return 0; iru_init(&iru, rxq, 0 /* flid */); map = fl->ifl_sds.ifsd_map; refill_pidx = netmap_idx_k2n(kring, nm_i); /* * IMPORTANT: we must leave one free slot in the ring, * so move head back by one unit */ head = nm_prev(head, lim); while (nm_i != head) { for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) { struct netmap_slot *slot = &ring->slot[nm_i]; void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]); uint32_t nic_i_dma = refill_pidx; nic_i = netmap_idx_k2n(kring, nm_i); MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH); if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ return netmap_ring_reinit(kring); fl->ifl_vm_addrs[tmp_pidx] = addr; if (__predict_false(init) && map) { netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); } else if (map && (slot->flags & NS_BUF_CHANGED)) { /* buffer has changed, reload map */ netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); } slot->flags &= ~NS_BUF_CHANGED; nm_i = nm_next(nm_i, lim); fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim); if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1) continue; iru.iru_pidx = refill_pidx; iru.iru_count = tmp_pidx+1; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); refill_pidx = nic_i; if (map == NULL) continue; for (int n = 0; n < iru.iru_count; n++) { bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma], BUS_DMASYNC_PREREAD); /* XXX - change this to not use the netmap func*/ nic_i_dma = nm_next(nic_i_dma, lim); } } } kring->nr_hwcur = head; if (map) bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); return (0); } /* * Reconcile kernel and user view of the transmit ring. * * All information is in the kring. * Userspace wants to send packets up to the one before kring->rhead, * kernel knows kring->nr_hwcur is the first unsent packet. * * Here we push packets out (as many as possible), and possibly * reclaim buffers from previously completed transmission. * * The caller (netmap) guarantees that there is only one instance * running at any time. Any interference with other driver * methods should be handled by the individual drivers. */ static int iflib_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int nm_i; /* index into the netmap ring */ u_int nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; struct if_pkt_info pi; /* * interrupts on every tx packet are expensive so request * them every half ring, or where NS_REPORT is set */ u_int report_frequency = kring->nkr_num_slots >> 1; /* device-specific */ if_ctx_t ctx = ifp->if_softc; iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; if (txq->ift_sds.ifsd_map) bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * First part: process new packets to send. * nm_i is the current index in the netmap ring, * nic_i is the corresponding index in the NIC ring. * * If we have packets to send (nm_i != head) * iterate over the netmap ring, fetch length and update * the corresponding slot in the NIC ring. Some drivers also * need to update the buffer's physical address in the NIC slot * even NS_BUF_CHANGED is not set (PNMB computes the addresses). * * The netmap_reload_map() calls is especially expensive, * even when (as in this case) the tag is 0, so do only * when the buffer has actually changed. * * If possible do not set the report/intr bit on all slots, * but only a few times per ring or when NS_REPORT is set. * * Finally, on 10G and faster drivers, it might be useful * to prefetch the next slot and txr entry. */ nm_i = netmap_idx_n2k(kring, kring->nr_hwcur); pkt_info_zero(&pi); pi.ipi_segs = txq->ift_segs; pi.ipi_qsidx = kring->ring_id; if (nm_i != head) { /* we have new packets to send */ nic_i = netmap_idx_k2n(kring, nm_i); __builtin_prefetch(&ring->slot[nm_i]); __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); if (txq->ift_sds.ifsd_map) __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); for (n = 0; nm_i != head; n++) { struct netmap_slot *slot = &ring->slot[nm_i]; u_int len = slot->len; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); int flags = (slot->flags & NS_REPORT || nic_i == 0 || nic_i == report_frequency) ? IPI_TX_INTR : 0; /* device-specific */ pi.ipi_len = len; pi.ipi_segs[0].ds_addr = paddr; pi.ipi_segs[0].ds_len = len; pi.ipi_nsegs = 1; pi.ipi_ndescs = 0; pi.ipi_pidx = nic_i; pi.ipi_flags = flags; /* Fill the slot in the NIC ring. */ ctx->isc_txd_encap(ctx->ifc_softc, &pi); /* prefetch for next round */ __builtin_prefetch(&ring->slot[nm_i + 1]); __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); if (txq->ift_sds.ifsd_map) { __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); NM_CHECK_ADDR_LEN(na, addr, len); if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr); } /* make sure changes to the buffer are synced */ bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i], BUS_DMASYNC_PREWRITE); } slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } kring->nr_hwcur = head; /* synchronize the NIC ring */ if (txq->ift_sds.ifsd_map) bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* (re)start the tx unit up to slot nic_i (excluded) */ ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); } /* * Second part: reclaim buffers for completed transmissions. */ if (iflib_tx_credits_update(ctx, txq)) { /* some tx completed, increment avail */ nic_i = txq->ift_cidx_processed; kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); } return (0); } /* * Reconcile kernel and user view of the receive ring. * Same as for the txsync, this routine must be efficient. * The caller guarantees a single invocations, but races against * the rest of the driver should be handled here. * * On call, kring->rhead is the first packet that userspace wants * to keep, and kring->rcur is the wakeup point. * The kernel has previously reported packets up to kring->rtail. * * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective * of whether or not we received an interrupt. */ static int iflib_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; uint32_t nm_i; /* index into the netmap ring */ uint32_t nic_i; /* index into the NIC ring */ u_int i, n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = netmap_idx_n2k(kring, kring->rhead); int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; struct if_rxd_info ri; struct ifnet *ifp = na->ifp; if_ctx_t ctx = ifp->if_softc; iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; iflib_fl_t fl = rxq->ifr_fl; if (head > lim) return netmap_ring_reinit(kring); /* XXX check sync modes */ for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) { if (fl->ifl_sds.ifsd_map == NULL) continue; bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); } /* * First part: import newly received packets. * * nm_i is the index of the next free slot in the netmap ring, * nic_i is the index of the next received packet in the NIC ring, * and they may differ in case if_init() has been called while * in netmap mode. For the receive ring we have * * nic_i = rxr->next_check; * nm_i = kring->nr_hwtail (previous) * and * nm_i == (nic_i + kring->nkr_hwofs) % ring_size * * rxr->next_check is set to 0 on a ring reinit */ if (netmap_no_pendintr || force_update) { int crclen = iflib_crcstrip ? 0 : 4; int error, avail; uint16_t slot_flags = kring->nkr_slot_flags; for (i = 0; i < rxq->ifr_nfl; i++) { fl = &rxq->ifr_fl[i]; nic_i = fl->ifl_cidx; nm_i = netmap_idx_n2k(kring, nic_i); avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX); for (n = 0; avail > 0; n++, avail--) { rxd_info_zero(&ri); ri.iri_frags = rxq->ifr_frags; ri.iri_qsidx = kring->ring_id; ri.iri_ifp = ctx->ifc_ifp; ri.iri_cidx = nic_i; error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen; ring->slot[nm_i].flags = slot_flags; if (fl->ifl_sds.ifsd_map) bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } if (n) { /* update the state variables */ if (netmap_no_pendintr && !force_update) { /* diagnostics */ iflib_rx_miss ++; iflib_rx_miss_bufs += n; } fl->ifl_cidx = nic_i; kring->nr_hwtail = netmap_idx_k2n(kring, nm_i); } kring->nr_kflags &= ~NKR_PENDINTR; } } /* * Second part: skip past packets that userspace has released. * (kring->nr_hwcur to head excluded), * and make the buffers available for reception. * As usual nm_i is the index in the netmap ring, * nic_i is the index in the NIC ring, and * nm_i == (nic_i + kring->nkr_hwofs) % ring_size */ /* XXX not sure how this will work with multiple free lists */ nm_i = netmap_idx_n2k(kring, kring->nr_hwcur); return (netmap_fl_refill(rxq, kring, nm_i, false)); } static void iflib_netmap_intr(struct netmap_adapter *na, int onoff) { struct ifnet *ifp = na->ifp; if_ctx_t ctx = ifp->if_softc; CTX_LOCK(ctx); if (onoff) { IFDI_INTR_ENABLE(ctx); } else { IFDI_INTR_DISABLE(ctx); } CTX_UNLOCK(ctx); } static int iflib_netmap_attach(if_ctx_t ctx) { struct netmap_adapter na; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; bzero(&na, sizeof(na)); na.ifp = ctx->ifc_ifp; na.na_flags = NAF_BDG_MAYSLEEP; MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); na.num_tx_desc = scctx->isc_ntxd[0]; na.num_rx_desc = scctx->isc_nrxd[0]; na.nm_txsync = iflib_netmap_txsync; na.nm_rxsync = iflib_netmap_rxsync; na.nm_register = iflib_netmap_register; na.nm_intr = iflib_netmap_intr; na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; return (netmap_attach(&na)); } static void iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) { struct netmap_adapter *na = NA(ctx->ifc_ifp); struct netmap_slot *slot; slot = netmap_reset(na, NR_TX, txq->ift_id, 0); if (slot == NULL) return; if (txq->ift_sds.ifsd_map == NULL) return; for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { /* * In netmap mode, set the map for the packet buffer. * NOTE: Some drivers (not this one) also need to set * the physical buffer address in the NIC ring. * netmap_idx_n2k() maps a nic index, i, into the corresponding * netmap slot index, si */ int si = netmap_idx_n2k(&na->tx_rings[txq->ift_id], i); netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); } } static void iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) { struct netmap_adapter *na = NA(ctx->ifc_ifp); struct netmap_kring *kring = &na->rx_rings[rxq->ifr_id]; struct netmap_slot *slot; uint32_t nm_i; slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); if (slot == NULL) return; nm_i = netmap_idx_n2k(kring, 0); netmap_fl_refill(rxq, kring, nm_i, true); } #define iflib_netmap_detach(ifp) netmap_detach(ifp) #else #define iflib_netmap_txq_init(ctx, txq) #define iflib_netmap_rxq_init(ctx, rxq) #define iflib_netmap_detach(ifp) #define iflib_netmap_attach(ctx) (0) #define netmap_rx_irq(ifp, qid, budget) (0) #define netmap_tx_irq(ifp, qid) do {} while (0) #endif #if defined(__i386__) || defined(__amd64__) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } static __inline void prefetch2cachelines(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); #if (CACHE_LINE_SIZE < 128) __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long))))); #endif } #else #define prefetch(x) #define prefetch2cachelines(x) #endif + +static void +iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) +{ + iflib_fl_t fl; + + fl = &rxq->ifr_fl[flid]; + iru->iru_paddrs = fl->ifl_bus_addrs; + iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; + iru->iru_idxs = fl->ifl_rxd_idxs; + iru->iru_qsidx = rxq->ifr_id; + iru->iru_buf_size = fl->ifl_buf_size; + iru->iru_flidx = fl->ifl_id; +} static void _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { if (err) return; *(bus_addr_t *) arg = segs[0].ds_addr; } int iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) { int err; if_shared_ctx_t sctx = ctx->ifc_sctx; device_t dev = ctx->ifc_dev; KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ sctx->isc_q_align, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->idi_tag); if (err) { device_printf(dev, "%s: bus_dma_tag_create failed: %d\n", __func__, err); goto fail_0; } err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); if (err) { device_printf(dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, err); goto fail_1; } dma->idi_paddr = IF_BAD_DMA; err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); if (err || dma->idi_paddr == IF_BAD_DMA) { device_printf(dev, "%s: bus_dmamap_load failed: %d\n", __func__, err); goto fail_2; } dma->idi_size = size; return (0); fail_2: bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); fail_1: bus_dma_tag_destroy(dma->idi_tag); fail_0: dma->idi_tag = NULL; return (err); } int iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) { int i, err; iflib_dma_info_t *dmaiter; dmaiter = dmalist; for (i = 0; i < count; i++, dmaiter++) { if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) break; } if (err) iflib_dma_free_multi(dmalist, i); return (err); } void iflib_dma_free(iflib_dma_info_t dma) { if (dma->idi_tag == NULL) return; if (dma->idi_paddr != IF_BAD_DMA) { bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->idi_tag, dma->idi_map); dma->idi_paddr = IF_BAD_DMA; } if (dma->idi_vaddr != NULL) { bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); dma->idi_vaddr = NULL; } bus_dma_tag_destroy(dma->idi_tag); dma->idi_tag = NULL; } void iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) { int i; iflib_dma_info_t *dmaiter = dmalist; for (i = 0; i < count; i++, dmaiter++) iflib_dma_free(*dmaiter); } #ifdef EARLY_AP_STARTUP static const int iflib_started = 1; #else /* * We used to abuse the smp_started flag to decide if the queues have been * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()). * That gave bad races, since the SYSINIT() runs strictly after smp_started * is set. Run a SYSINIT() strictly after that to just set a usable * completion flag. */ static int iflib_started; static void iflib_record_started(void *arg) { iflib_started = 1; } SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST, iflib_record_started, NULL); #endif static int iflib_fast_intr(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; if (!iflib_started) return (FILTER_HANDLED); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) return (FILTER_HANDLED); GROUPTASK_ENQUEUE(gtask); return (FILTER_HANDLED); } static int iflib_fast_intr_rxtx(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; if_ctx_t ctx; int i, cidx; if (!iflib_started) return (FILTER_HANDLED); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) return (FILTER_HANDLED); for (i = 0; i < rxq->ifr_ntxqirq; i++) { qidx_t txqid = rxq->ifr_txqid[i]; ctx = rxq->ifr_ctx; if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) { IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid); continue; } GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); } if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) cidx = rxq->ifr_cq_cidx; else cidx = rxq->ifr_fl[0].ifl_cidx; if (iflib_rxd_avail(ctx, rxq, cidx, 1)) GROUPTASK_ENQUEUE(gtask); else IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); return (FILTER_HANDLED); } static int iflib_fast_intr_ctx(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; if (!iflib_started) return (FILTER_HANDLED); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) return (FILTER_HANDLED); GROUPTASK_ENQUEUE(gtask); return (FILTER_HANDLED); } static int _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, driver_filter_t filter, driver_intr_t handler, void *arg, char *name) { int rc, flags; struct resource *res; void *tag = NULL; device_t dev = ctx->ifc_dev; flags = RF_ACTIVE; if (ctx->ifc_flags & IFC_LEGACY) flags |= RF_SHAREABLE; MPASS(rid < 512); irq->ii_rid = rid; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags); if (res == NULL) { device_printf(dev, "failed to allocate IRQ for rid %d, name %s.\n", rid, name); return (ENOMEM); } irq->ii_res = res; KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, filter, handler, arg, &tag); if (rc != 0) { device_printf(dev, "failed to setup interrupt for rid %d, name %s: %d\n", rid, name ? name : "unknown", rc); return (rc); } else if (name) bus_describe_intr(dev, res, tag, "%s", name); irq->ii_tag = tag; return (0); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. This is * called only once at attach, setup is done every reset. * **********************************************************************/ static int iflib_txsd_alloc(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; int err, nsegments, ntsosegments; nsegments = scctx->isc_tx_nsegments; ntsosegments = scctx->isc_tx_tso_segments_max; MPASS(scctx->isc_ntxd[0] > 0); MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); MPASS(nsegments > 0); MPASS(ntsosegments > 0); /* * Setup DMA descriptor areas. */ if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sctx->isc_tx_maxsize, /* maxsize */ nsegments, /* nsegments */ sctx->isc_tx_maxsegsize, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txq->ift_desc_tag))) { device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n", (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); goto fail; } if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ scctx->isc_tx_tso_size_max, /* maxsize */ ntsosegments, /* nsegments */ scctx->isc_tx_tso_segsize_max, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txq->ift_tso_desc_tag))) { device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err); goto fail; } if (!(txq->ift_sds.ifsd_flags = (uint8_t *) malloc(sizeof(uint8_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } if (!(txq->ift_sds.ifsd_m = (struct mbuf **) malloc(sizeof(struct mbuf *) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) if ((ctx->ifc_flags & IFC_DMAR) == 0) return (0); if (!(txq->ift_sds.ifsd_map = (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer map memory\n"); err = ENOMEM; goto fail; } for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]); if (err != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } } #endif return (0); fail: /* We free all, it handles case where we are in the middle */ iflib_tx_structures_free(ctx); return (err); } static void iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) { bus_dmamap_t map; map = NULL; if (txq->ift_sds.ifsd_map != NULL) map = txq->ift_sds.ifsd_map[i]; if (map != NULL) { bus_dmamap_unload(txq->ift_desc_tag, map); bus_dmamap_destroy(txq->ift_desc_tag, map); txq->ift_sds.ifsd_map[i] = NULL; } } static void iflib_txq_destroy(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; for (int i = 0; i < txq->ift_size; i++) iflib_txsd_destroy(ctx, txq, i); if (txq->ift_sds.ifsd_map != NULL) { free(txq->ift_sds.ifsd_map, M_IFLIB); txq->ift_sds.ifsd_map = NULL; } if (txq->ift_sds.ifsd_m != NULL) { free(txq->ift_sds.ifsd_m, M_IFLIB); txq->ift_sds.ifsd_m = NULL; } if (txq->ift_sds.ifsd_flags != NULL) { free(txq->ift_sds.ifsd_flags, M_IFLIB); txq->ift_sds.ifsd_flags = NULL; } if (txq->ift_desc_tag != NULL) { bus_dma_tag_destroy(txq->ift_desc_tag); txq->ift_desc_tag = NULL; } if (txq->ift_tso_desc_tag != NULL) { bus_dma_tag_destroy(txq->ift_tso_desc_tag); txq->ift_tso_desc_tag = NULL; } } static void iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) { struct mbuf **mp; mp = &txq->ift_sds.ifsd_m[i]; if (*mp == NULL) return; if (txq->ift_sds.ifsd_map != NULL) { bus_dmamap_sync(txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_desc_tag, txq->ift_sds.ifsd_map[i]); } m_free(*mp); DBG_COUNTER_INC(tx_frees); *mp = NULL; } static int iflib_txq_setup(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; iflib_dma_info_t di; int i; /* Set number of descriptors available */ txq->ift_qstatus = IFLIB_QUEUE_IDLE; /* XXX make configurable */ txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; /* Reset indices */ txq->ift_cidx_processed = 0; txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) bzero((void *)di->idi_vaddr, di->idi_size); IFDI_TXQ_SETUP(ctx, txq->ift_id); for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) bus_dmamap_sync(di->idi_tag, di->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int iflib_rxsd_alloc(iflib_rxq_t rxq) { if_ctx_t ctx = rxq->ifr_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; iflib_fl_t fl; int err; MPASS(scctx->isc_nrxd[0] > 0); MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); fl = rxq->ifr_fl; for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sctx->isc_rx_maxsize, /* maxsize */ sctx->isc_rx_nsegments, /* nsegments */ sctx->isc_rx_maxsegsize, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &fl->ifl_desc_tag); if (err) { device_printf(dev, "%s: bus_dma_tag_create failed %d\n", __func__, err); goto fail; } if (!(fl->ifl_sds.ifsd_flags = (uint8_t *) malloc(sizeof(uint8_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } if (!(fl->ifl_sds.ifsd_m = (struct mbuf **) malloc(sizeof(struct mbuf *) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } if (!(fl->ifl_sds.ifsd_cl = (caddr_t *) malloc(sizeof(caddr_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) if ((ctx->ifc_flags & IFC_DMAR) == 0) continue; if (!(fl->ifl_sds.ifsd_map = (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer map memory\n"); err = ENOMEM; goto fail; } for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]); if (err != 0) { device_printf(dev, "Unable to create RX buffer DMA map\n"); goto fail; } } #endif } return (0); fail: iflib_rx_structures_free(ctx); return (err); } /* * Internal service routines */ struct rxq_refill_cb_arg { int error; bus_dma_segment_t seg; int nseg; }; static void _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct rxq_refill_cb_arg *cb_arg = arg; cb_arg->error = error; cb_arg->seg = segs[0]; cb_arg->nseg = nseg; } #ifdef ACPI_DMAR #define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR) #else #define IS_DMAR(ctx) (0) #endif /** * rxq_refill - refill an rxq free-buffer list * @ctx: the iflib context * @rxq: the free-list to refill * @n: the number of new buffers to allocate * * (Re)populate an rxq free-buffer list with up to @n new packet buffers. * The caller must assure that @n does not exceed the queue's capacity. */ static void _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) { struct mbuf *m; int idx, frag_idx = fl->ifl_fragidx; int pidx = fl->ifl_pidx; caddr_t cl, *sd_cl; struct mbuf **sd_m; uint8_t *sd_flags; struct if_rxd_update iru; bus_dmamap_t *sd_map; int n, i = 0; uint64_t bus_addr; int err; sd_m = fl->ifl_sds.ifsd_m; sd_map = fl->ifl_sds.ifsd_map; sd_cl = fl->ifl_sds.ifsd_cl; sd_flags = fl->ifl_sds.ifsd_flags; idx = pidx; n = count; MPASS(n > 0); MPASS(fl->ifl_credits + n <= fl->ifl_size); if (pidx < fl->ifl_cidx) MPASS(pidx + n <= fl->ifl_cidx); if (pidx == fl->ifl_cidx && (fl->ifl_credits < fl->ifl_size)) MPASS(fl->ifl_gen == 0); if (pidx > fl->ifl_cidx) MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); DBG_COUNTER_INC(fl_refills); if (n > 8) DBG_COUNTER_INC(fl_refills_large); iru_init(&iru, fl->ifl_rxq, fl->ifl_id); while (n--) { /* * We allocate an uninitialized mbuf + cluster, mbuf is * initialized after rx. * * If the cluster is still set then we know a minimum sized packet was received */ bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); if ((frag_idx < 0) || (frag_idx >= fl->ifl_size)) bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); if ((cl = sd_cl[frag_idx]) == NULL) { if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL) break; #if MEMORY_LOGGING fl->ifl_cl_enqueued++; #endif } if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { break; } #if MEMORY_LOGGING fl->ifl_m_enqueued++; #endif DBG_COUNTER_INC(rx_allocs); #if defined(__i386__) || defined(__amd64__) if (!IS_DMAR(ctx)) { bus_addr = pmap_kextract((vm_offset_t)cl); } else #endif { struct rxq_refill_cb_arg cb_arg; iflib_rxq_t q; cb_arg.error = 0; q = fl->ifl_rxq; MPASS(sd_map != NULL); MPASS(sd_map[frag_idx] != NULL); err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx], cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0); bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx], BUS_DMASYNC_PREREAD); if (err != 0 || cb_arg.error) { /* * !zone_pack ? */ if (fl->ifl_zone == zone_pack) uma_zfree(fl->ifl_zone, cl); m_free(m); n = 0; goto done; } bus_addr = cb_arg.seg.ds_addr; } bit_set(fl->ifl_rx_bitmap, frag_idx); sd_flags[frag_idx] |= RX_SW_DESC_INUSE; MPASS(sd_m[frag_idx] == NULL); sd_cl[frag_idx] = cl; sd_m[frag_idx] = m; fl->ifl_rxd_idxs[i] = frag_idx; fl->ifl_bus_addrs[i] = bus_addr; fl->ifl_vm_addrs[i] = cl; fl->ifl_credits++; i++; MPASS(fl->ifl_credits <= fl->ifl_size); if (++idx == fl->ifl_size) { fl->ifl_gen = 1; idx = 0; } if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { iru.iru_pidx = pidx; iru.iru_count = i; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); i = 0; pidx = idx; fl->ifl_pidx = idx; } } done: DBG_COUNTER_INC(rxd_flush); if (fl->ifl_pidx == 0) pidx = fl->ifl_size - 1; else pidx = fl->ifl_pidx - 1; if (sd_map) bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); fl->ifl_fragidx = frag_idx; } static __inline void __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max) { /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; #ifdef INVARIANTS int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; #endif MPASS(fl->ifl_credits <= fl->ifl_size); MPASS(reclaimable == delta); if (reclaimable > 0) _iflib_fl_refill(ctx, fl, min(max, reclaimable)); } static void iflib_fl_bufs_free(iflib_fl_t fl) { iflib_dma_info_t idi = fl->ifl_ifdi; uint32_t i; for (i = 0; i < fl->ifl_size; i++) { struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i]; caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; if (*sd_flags & RX_SW_DESC_INUSE) { if (fl->ifl_sds.ifsd_map != NULL) { bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i]; bus_dmamap_unload(fl->ifl_desc_tag, sd_map); bus_dmamap_destroy(fl->ifl_desc_tag, sd_map); } if (*sd_m != NULL) { m_init(*sd_m, M_NOWAIT, MT_DATA, 0); uma_zfree(zone_mbuf, *sd_m); } if (*sd_cl != NULL) uma_zfree(fl->ifl_zone, *sd_cl); *sd_flags = 0; } else { MPASS(*sd_cl == NULL); MPASS(*sd_m == NULL); } #if MEMORY_LOGGING fl->ifl_m_dequeued++; fl->ifl_cl_dequeued++; #endif *sd_cl = NULL; *sd_m = NULL; } #ifdef INVARIANTS for (i = 0; i < fl->ifl_size; i++) { MPASS(fl->ifl_sds.ifsd_flags[i] == 0); MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); MPASS(fl->ifl_sds.ifsd_m[i] == NULL); } #endif /* * Reset free list values */ fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; bzero(idi->idi_vaddr, idi->idi_size); } /********************************************************************* * * Initialize a receive ring and its buffers. * **********************************************************************/ static int iflib_fl_setup(iflib_fl_t fl) { iflib_rxq_t rxq = fl->ifl_rxq; if_ctx_t ctx = rxq->ifr_ctx; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size); /* ** Free current RX buffer structs and their mbufs */ iflib_fl_bufs_free(fl); /* Now replenish the mbufs */ MPASS(fl->ifl_credits == 0); /* * XXX don't set the max_frame_size to larger * than the hardware can handle */ if (sctx->isc_max_frame_size <= 2048) fl->ifl_buf_size = MCLBYTES; #ifndef CONTIGMALLOC_WORKS else fl->ifl_buf_size = MJUMPAGESIZE; #else else if (sctx->isc_max_frame_size <= 4096) fl->ifl_buf_size = MJUMPAGESIZE; else if (sctx->isc_max_frame_size <= 9216) fl->ifl_buf_size = MJUM9BYTES; else fl->ifl_buf_size = MJUM16BYTES; #endif if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; fl->ifl_cltype = m_gettype(fl->ifl_buf_size); fl->ifl_zone = m_getzone(fl->ifl_buf_size); /* avoid pre-allocating zillions of clusters to an idle card * potentially speeding up attach */ _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); MPASS(min(128, fl->ifl_size) == fl->ifl_credits); if (min(128, fl->ifl_size) != fl->ifl_credits) return (ENOBUFS); /* * handle failure */ MPASS(rxq != NULL); MPASS(fl->ifl_ifdi != NULL); bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Free receive ring data structures * **********************************************************************/ static void iflib_rx_sds_free(iflib_rxq_t rxq) { iflib_fl_t fl; int i; if (rxq->ifr_fl != NULL) { for (i = 0; i < rxq->ifr_nfl; i++) { fl = &rxq->ifr_fl[i]; if (fl->ifl_desc_tag != NULL) { bus_dma_tag_destroy(fl->ifl_desc_tag); fl->ifl_desc_tag = NULL; } free(fl->ifl_sds.ifsd_m, M_IFLIB); free(fl->ifl_sds.ifsd_cl, M_IFLIB); /* XXX destroy maps first */ free(fl->ifl_sds.ifsd_map, M_IFLIB); fl->ifl_sds.ifsd_m = NULL; fl->ifl_sds.ifsd_cl = NULL; fl->ifl_sds.ifsd_map = NULL; } free(rxq->ifr_fl, M_IFLIB); rxq->ifr_fl = NULL; rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; } } /* * MI independent logic * */ static void iflib_timer(void *arg) { iflib_txq_t txq = arg; if_ctx_t ctx = txq->ift_ctx; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; /* ** Check on the state of the TX queue(s), this ** can be done without the lock because its RO ** and the HUNG state will be static if set. */ IFDI_TIMER(ctx, txq->ift_id); if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && ((txq->ift_cleaned_prev == txq->ift_cleaned) || (sctx->isc_pause_frames == 0))) goto hung; if (ifmp_ring_is_stalled(txq->ift_br)) txq->ift_qstatus = IFLIB_QUEUE_HUNG; txq->ift_cleaned_prev = txq->ift_cleaned; /* handle any laggards */ if (txq->ift_db_pending) GROUPTASK_ENQUEUE(&txq->ift_task); sctx->isc_pause_frames = 0; if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); return; hung: CTX_LOCK(ctx); if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n", txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); IFDI_WATCHDOG_RESET(ctx); ctx->ifc_watchdog_events++; ctx->ifc_flags |= IFC_DO_RESET; iflib_admin_intr_deferred(ctx); CTX_UNLOCK(ctx); } static void iflib_init_locked(if_ctx_t ctx) { if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_t ifp = ctx->ifc_ifp; iflib_fl_t fl; iflib_txq_t txq; iflib_rxq_t rxq; int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); IFDI_INTR_DISABLE(ctx); tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); /* Set hardware offload abilities */ if_clearhwassist(ifp); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassistbits(ifp, tx_ip_csum_flags, 0); if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); if (if_getcapenable(ifp) & IFCAP_TSO4) if_sethwassistbits(ifp, CSUM_IP_TSO, 0); if (if_getcapenable(ifp) & IFCAP_TSO6) if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); iflib_netmap_txq_init(ctx, txq); } #ifdef INVARIANTS i = if_getdrvflags(ifp); #endif IFDI_INIT(ctx); MPASS(if_getdrvflags(ifp) == i); for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { /* XXX this should really be done on a per-queue basis */ if (if_getcapenable(ifp) & IFCAP_NETMAP) { MPASS(rxq->ifr_id == i); iflib_netmap_rxq_init(ctx, rxq); continue; } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { if (iflib_fl_setup(fl)) { device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n"); goto done; } } } done: if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); IFDI_INTR_ENABLE(ctx); txq = ctx->ifc_txqs; for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); } static int iflib_media_change(if_t ifp) { if_ctx_t ctx = if_getsoftc(ifp); int err; CTX_LOCK(ctx); if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) iflib_init_locked(ctx); CTX_UNLOCK(ctx); return (err); } static void iflib_media_status(if_t ifp, struct ifmediareq *ifmr) { if_ctx_t ctx = if_getsoftc(ifp); CTX_LOCK(ctx); IFDI_UPDATE_ADMIN_STATUS(ctx); IFDI_MEDIA_STATUS(ctx, ifmr); CTX_UNLOCK(ctx); } static void iflib_stop(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; iflib_rxq_t rxq = ctx->ifc_rxqs; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; iflib_dma_info_t di; iflib_fl_t fl; int i, j; /* Tell the stack that the interface is no longer active */ if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); IFDI_INTR_DISABLE(ctx); DELAY(1000); IFDI_STOP(ctx); DELAY(1000); iflib_debug_reset(); /* Wait for current tx queue users to exit to disarm watchdog timer. */ for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { /* make sure all transmitters have completed before proceeding XXX */ /* clean any enqueued buffers */ iflib_ifmp_purge(txq); /* Free any existing tx buffers. */ for (j = 0; j < txq->ift_size; j++) { iflib_txsd_free(ctx, txq, j); } txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; txq->ift_pullups = 0; ifmp_ring_reset_stats(txq->ift_br); for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++) bzero((void *)di->idi_vaddr, di->idi_size); } for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { /* make sure all transmitters have completed before proceeding XXX */ for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwrxqs; j++, di++) bzero((void *)di->idi_vaddr, di->idi_size); /* also resets the free lists pidx/cidx */ for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) iflib_fl_bufs_free(fl); } } static inline caddr_t calc_next_rxd(iflib_fl_t fl, int cidx) { qidx_t size; int nrxd; caddr_t start, end, cur, next; nrxd = fl->ifl_size; size = fl->ifl_rxd_size; start = fl->ifl_ifdi->idi_vaddr; if (__predict_false(size == 0)) return (start); cur = start + size*cidx; end = start + size*nrxd; next = CACHE_PTR_NEXT(cur); return (next < end ? next : start); } static inline void prefetch_pkts(iflib_fl_t fl, int cidx) { int nextptr; int nrxd = fl->ifl_size; caddr_t next_rxd; nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1); prefetch(&fl->ifl_sds.ifsd_m[nextptr]); prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); next_rxd = calc_next_rxd(fl, cidx); prefetch(next_rxd); prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]); } static void rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd) { int flid, cidx; bus_dmamap_t map; iflib_fl_t fl; iflib_dma_info_t di; int next; map = NULL; flid = irf->irf_flid; cidx = irf->irf_idx; fl = &rxq->ifr_fl[flid]; sd->ifsd_fl = fl; sd->ifsd_cidx = cidx; sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx]; sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; fl->ifl_credits--; #if MEMORY_LOGGING fl->ifl_m_dequeued++; #endif if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) prefetch_pkts(fl, cidx); if (fl->ifl_sds.ifsd_map != NULL) { next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1); prefetch(&fl->ifl_sds.ifsd_map[next]); map = fl->ifl_sds.ifsd_map[cidx]; di = fl->ifl_ifdi; next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1); prefetch(&fl->ifl_sds.ifsd_flags[next]); bus_dmamap_sync(di->idi_tag, di->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* not valid assert if bxe really does SGE from non-contiguous elements */ MPASS(fl->ifl_cidx == cidx); if (unload) bus_dmamap_unload(fl->ifl_desc_tag, map); } fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1); if (__predict_false(fl->ifl_cidx == 0)) fl->ifl_gen = 0; if (map != NULL) bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bit_clear(fl->ifl_rx_bitmap, cidx); } static struct mbuf * assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd) { int i, padlen , flags; struct mbuf *m, *mh, *mt; caddr_t cl; i = 0; mh = NULL; do { rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd); MPASS(*sd->ifsd_cl != NULL); MPASS(*sd->ifsd_m != NULL); /* Don't include zero-length frags */ if (ri->iri_frags[i].irf_len == 0) { /* XXX we can save the cluster here, but not the mbuf */ m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0); m_free(*sd->ifsd_m); *sd->ifsd_m = NULL; continue; } m = *sd->ifsd_m; *sd->ifsd_m = NULL; if (mh == NULL) { flags = M_PKTHDR|M_EXT; mh = mt = m; padlen = ri->iri_pad; } else { flags = M_EXT; mt->m_next = m; mt = m; /* assuming padding is only on the first fragment */ padlen = 0; } cl = *sd->ifsd_cl; *sd->ifsd_cl = NULL; /* Can these two be made one ? */ m_init(m, M_NOWAIT, MT_DATA, flags); m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); /* * These must follow m_init and m_cljset */ m->m_data += padlen; ri->iri_len -= padlen; m->m_len = ri->iri_frags[i].irf_len; } while (++i < ri->iri_nfrags); return (mh); } /* * Process one software descriptor */ static struct mbuf * iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) { struct if_rxsd sd; struct mbuf *m; /* should I merge this back in now that the two paths are basically duplicated? */ if (ri->iri_nfrags == 1 && ri->iri_frags[0].irf_len <= IFLIB_RX_COPY_THRESH) { rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd); m = *sd.ifsd_m; *sd.ifsd_m = NULL; m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); #ifndef __NO_STRICT_ALIGNMENT if (!IP_ALIGNED(m)) m->m_data += 2; #endif memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); m->m_len = ri->iri_frags[0].irf_len; } else { m = assemble_segments(rxq, ri, &sd); } m->m_pkthdr.len = ri->iri_len; m->m_pkthdr.rcvif = ri->iri_ifp; m->m_flags |= ri->iri_flags; m->m_pkthdr.ether_vtag = ri->iri_vtag; m->m_pkthdr.flowid = ri->iri_flowid; M_HASHTYPE_SET(m, ri->iri_rsstype); m->m_pkthdr.csum_flags = ri->iri_csum_flags; m->m_pkthdr.csum_data = ri->iri_csum_data; return (m); } static bool iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) { if_ctx_t ctx = rxq->ifr_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; int avail, i; qidx_t *cidxp; struct if_rxd_info ri; int err, budget_left, rx_bytes, rx_pkts; iflib_fl_t fl; struct ifnet *ifp; int lro_enabled; /* * XXX early demux data packets so that if_input processing only handles * acks in interrupt context */ struct mbuf *m, *mh, *mt, *mf; ifp = ctx->ifc_ifp; mh = mt = NULL; MPASS(budget > 0); rx_pkts = rx_bytes = 0; if (sctx->isc_flags & IFLIB_HAS_RXCQ) cidxp = &rxq->ifr_cq_cidx; else cidxp = &rxq->ifr_fl[0].ifl_cidx; if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) __iflib_fl_refill_lt(ctx, fl, budget + 8); DBG_COUNTER_INC(rx_unavail); return (false); } for (budget_left = budget; (budget_left > 0) && (avail > 0); budget_left--, avail--) { if (__predict_false(!CTX_ACTIVE(ctx))) { DBG_COUNTER_INC(rx_ctx_inactive); break; } /* * Reset client set fields to their default values */ rxd_info_zero(&ri); ri.iri_qsidx = rxq->ifr_id; ri.iri_cidx = *cidxp; ri.iri_ifp = ifp; ri.iri_frags = rxq->ifr_frags; err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); if (err) goto err; if (sctx->isc_flags & IFLIB_HAS_RXCQ) { *cidxp = ri.iri_cidx; /* Update our consumer index */ /* XXX NB: shurd - check if this is still safe */ while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) { rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; rxq->ifr_cq_gen = 0; } /* was this only a completion queue message? */ if (__predict_false(ri.iri_nfrags == 0)) continue; } MPASS(ri.iri_nfrags != 0); MPASS(ri.iri_len != 0); /* will advance the cidx on the corresponding free lists */ m = iflib_rxd_pkt_get(rxq, &ri); if (avail == 0 && budget_left) avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); if (__predict_false(m == NULL)) { DBG_COUNTER_INC(rx_mbuf_null); continue; } /* imm_pkt: -- cxgb */ if (mh == NULL) mh = mt = m; else { mt->m_nextpkt = m; mt = m; } } /* make sure that we can refill faster than drain */ for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) __iflib_fl_refill_lt(ctx, fl, budget + 8); lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); mt = mf = NULL; while (mh != NULL) { m = mh; if (mf == NULL) mf = m; mh = mh->m_nextpkt; m->m_nextpkt = NULL; #ifndef __NO_STRICT_ALIGNMENT if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL) continue; #endif rx_bytes += m->m_pkthdr.len; rx_pkts++; #if defined(INET6) || defined(INET) if (lro_enabled && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) { if (mf == m) mf = NULL; continue; } #endif if (mt != NULL) mt->m_nextpkt = m; mt = m; } if (mf != NULL) { ifp->if_input(ifp, mf); DBG_COUNTER_INC(rx_if_input); } if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); /* * Flush any outstanding LRO work */ #if defined(INET6) || defined(INET) tcp_lro_flush_all(&rxq->ifr_lc); #endif if (avail) return true; return (iflib_rxd_avail(ctx, rxq, *cidxp, 1)); err: CTX_LOCK(ctx); ctx->ifc_flags |= IFC_DO_RESET; iflib_admin_intr_deferred(ctx); CTX_UNLOCK(ctx); return (false); } #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) static inline qidx_t txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use) { qidx_t notify_count = TXD_NOTIFY_COUNT(txq); qidx_t minthresh = txq->ift_size / 8; if (in_use > 4*minthresh) return (notify_count); if (in_use > 2*minthresh) return (notify_count >> 1); if (in_use > minthresh) return (notify_count >> 3); return (0); } static inline qidx_t txq_max_rs_deferred(iflib_txq_t txq) { qidx_t notify_count = TXD_NOTIFY_COUNT(txq); qidx_t minthresh = txq->ift_size / 8; if (txq->ift_in_use > 4*minthresh) return (notify_count); if (txq->ift_in_use > 2*minthresh) return (notify_count >> 1); if (txq->ift_in_use > minthresh) return (notify_count >> 2); return (2); } #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use)) #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq) #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) /* forward compatibility for cxgb */ #define FIRST_QSET(ctx) 0 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) /* XXX we should be setting this to something other than zero */ #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) #define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max) static inline bool iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use) { qidx_t dbval, max; bool rang; rang = false; max = TXQ_MAX_DB_DEFERRED(txq, in_use); if (ring || txq->ift_db_pending >= max) { dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); txq->ift_db_pending = txq->ift_npending = 0; rang = true; } return (rang); } #ifdef PKT_DEBUG static void print_pkt(if_pkt_info_t pi) { printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); } #endif #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) static int iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) { if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; struct ether_vlan_header *eh; struct mbuf *m, *n; n = m = *mp; if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && M_WRITABLE(m) == 0) { if ((m = m_dup(m, M_NOWAIT)) == NULL) { return (ENOMEM); } else { m_freem(*mp); n = *mp = m; } } /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ if (__predict_false(m->m_len < sizeof(*eh))) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) return (ENOMEM); } eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { pi->ipi_etype = ntohs(eh->evl_proto); pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { pi->ipi_etype = ntohs(eh->evl_encap_proto); pi->ipi_ehdrlen = ETHER_HDR_LEN; } if (if_getmtu(txq->ift_ctx->ifc_ifp) >= pi->ipi_len) { pi->ipi_csum_flags &= ~(CSUM_IP_TSO|CSUM_IP6_TSO); } switch (pi->ipi_etype) { #ifdef INET case ETHERTYPE_IP: { struct ip *ip = NULL; struct tcphdr *th = NULL; int minthlen; minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); if (__predict_false(m->m_len < minthlen)) { /* * if this code bloat is causing too much of a hit * move it to a separate function and mark it noinline */ if (m->m_len == pi->ipi_ehdrlen) { n = m->m_next; MPASS(n); if (n->m_len >= sizeof(*ip)) { ip = (struct ip *)n->m_data; if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } else { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) return (ENOMEM); ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); } } else { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) return (ENOMEM); ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } } else { ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } pi->ipi_ip_hlen = ip->ip_hl << 2; pi->ipi_ipproto = ip->ip_p; pi->ipi_flags |= IPI_TX_IPV4; if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) ip->ip_sum = 0; if (IS_TSO4(pi)) { if (pi->ipi_ipproto == IPPROTO_TCP) { if (__predict_false(th == NULL)) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) return (ENOMEM); th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); } pi->ipi_tcp_hflags = th->th_flags; pi->ipi_tcp_hlen = th->th_off << 2; pi->ipi_tcp_seq = th->th_seq; } if (__predict_false(ip->ip_p != IPPROTO_TCP)) return (ENXIO); th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { ip->ip_sum = 0; ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); } } break; } #endif #ifdef INET6 case ETHERTYPE_IPV6: { struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); struct tcphdr *th; pi->ipi_ip_hlen = sizeof(struct ip6_hdr); if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) return (ENOMEM); } th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); /* XXX-BZ this will go badly in case of ext hdrs. */ pi->ipi_ipproto = ip6->ip6_nxt; pi->ipi_flags |= IPI_TX_IPV6; if (IS_TSO6(pi)) { if (pi->ipi_ipproto == IPPROTO_TCP) { if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) return (ENOMEM); } pi->ipi_tcp_hflags = th->th_flags; pi->ipi_tcp_hlen = th->th_off << 2; } if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) return (ENXIO); /* * The corresponding flag is set by the stack in the IPv4 * TSO case, but not in IPv6 (at least in FreeBSD 10.2). * So, set it here because the rest of the flow requires it. */ pi->ipi_csum_flags |= CSUM_TCP_IPV6; th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; } break; } #endif default: pi->ipi_csum_flags &= ~CSUM_OFFLOAD; pi->ipi_ip_hlen = 0; break; } *mp = m; return (0); } static __noinline struct mbuf * collapse_pkthdr(struct mbuf *m0) { struct mbuf *m, *m_next, *tmp; m = m0; m_next = m->m_next; while (m_next != NULL && m_next->m_len == 0) { m = m_next; m->m_next = NULL; m_free(m); m_next = m_next->m_next; } m = m0; m->m_next = m_next; if ((m_next->m_flags & M_EXT) == 0) { m = m_defrag(m, M_NOWAIT); } else { tmp = m_next->m_next; memcpy(m_next, m, MPKTHSIZE); m = m_next; m->m_next = tmp; } return (m); } /* * If dodgy hardware rejects the scatter gather chain we've handed it * we'll need to remove the mbuf chain from ifsg_m[] before we can add the * m_defrag'd mbufs */ static __noinline struct mbuf * iflib_remove_mbuf(iflib_txq_t txq) { int ntxd, i, pidx; struct mbuf *m, *mh, **ifsd_m; pidx = txq->ift_pidx; ifsd_m = txq->ift_sds.ifsd_m; ntxd = txq->ift_size; mh = m = ifsd_m[pidx]; ifsd_m[pidx] = NULL; #if MEMORY_LOGGING txq->ift_dequeued++; #endif i = 1; while (m) { ifsd_m[(pidx + i) & (ntxd -1)] = NULL; #if MEMORY_LOGGING txq->ift_dequeued++; #endif m = m->m_next; i++; } return (mh); } static int iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs, int max_segs, int flags) { if_ctx_t ctx; if_shared_ctx_t sctx; if_softc_ctx_t scctx; int i, next, pidx, err, ntxd, count; struct mbuf *m, *tmp, **ifsd_m; m = *m0; /* * Please don't ever do this */ if (__predict_false(m->m_len == 0)) *m0 = m = collapse_pkthdr(m); ctx = txq->ift_ctx; sctx = ctx->ifc_sctx; scctx = &ctx->ifc_softc_ctx; ifsd_m = txq->ift_sds.ifsd_m; ntxd = txq->ift_size; pidx = txq->ift_pidx; if (map != NULL) { uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags; err = bus_dmamap_load_mbuf_sg(tag, map, *m0, segs, nsegs, BUS_DMA_NOWAIT); if (err) return (err); ifsd_flags[pidx] |= TX_SW_DESC_MAPPED; count = 0; m = *m0; do { if (__predict_false(m->m_len <= 0)) { tmp = m; m = m->m_next; tmp->m_next = NULL; m_free(tmp); continue; } m = m->m_next; count++; } while (m != NULL); if (count > *nsegs) { ifsd_m[pidx] = *m0; ifsd_m[pidx]->m_flags |= M_TOOBIG; return (0); } m = *m0; count = 0; do { next = (pidx + count) & (ntxd-1); MPASS(ifsd_m[next] == NULL); ifsd_m[next] = m; count++; tmp = m; m = m->m_next; } while (m != NULL); } else { int buflen, sgsize, maxsegsz, max_sgsize; vm_offset_t vaddr; vm_paddr_t curaddr; count = i = 0; m = *m0; if (m->m_pkthdr.csum_flags & CSUM_TSO) maxsegsz = scctx->isc_tx_tso_segsize_max; else maxsegsz = sctx->isc_tx_maxsegsize; do { if (__predict_false(m->m_len <= 0)) { tmp = m; m = m->m_next; tmp->m_next = NULL; m_free(tmp); continue; } buflen = m->m_len; vaddr = (vm_offset_t)m->m_data; /* * see if we can't be smarter about physically * contiguous mappings */ next = (pidx + count) & (ntxd-1); MPASS(ifsd_m[next] == NULL); #if MEMORY_LOGGING txq->ift_enqueued++; #endif ifsd_m[next] = m; while (buflen > 0) { if (i >= max_segs) goto err; max_sgsize = MIN(buflen, maxsegsz); curaddr = pmap_kextract(vaddr); sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); sgsize = MIN(sgsize, max_sgsize); segs[i].ds_addr = curaddr; segs[i].ds_len = sgsize; vaddr += sgsize; buflen -= sgsize; i++; } count++; tmp = m; m = m->m_next; } while (m != NULL); *nsegs = i; } return (0); err: *m0 = iflib_remove_mbuf(txq); return (EFBIG); } static inline caddr_t calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) { qidx_t size; int ntxd; caddr_t start, end, cur, next; ntxd = txq->ift_size; size = txq->ift_txd_size[qid]; start = txq->ift_ifdi[qid].idi_vaddr; if (__predict_false(size == 0)) return (start); cur = start + size*cidx; end = start + size*ntxd; next = CACHE_PTR_NEXT(cur); return (next < end ? next : start); } static int iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) { if_ctx_t ctx; if_shared_ctx_t sctx; if_softc_ctx_t scctx; bus_dma_segment_t *segs; struct mbuf *m_head; void *next_txd; bus_dmamap_t map; struct if_pkt_info pi; int remap = 0; int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; bus_dma_tag_t desc_tag; segs = txq->ift_segs; ctx = txq->ift_ctx; sctx = ctx->ifc_sctx; scctx = &ctx->ifc_softc_ctx; segs = txq->ift_segs; ntxd = txq->ift_size; m_head = *m_headp; map = NULL; /* * If we're doing TSO the next descriptor to clean may be quite far ahead */ cidx = txq->ift_cidx; pidx = txq->ift_pidx; if (ctx->ifc_flags & IFC_PREFETCH) { next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { next_txd = calc_next_txd(txq, cidx, 0); prefetch(next_txd); } /* prefetch the next cache line of mbuf pointers and flags */ prefetch(&txq->ift_sds.ifsd_m[next]); if (txq->ift_sds.ifsd_map != NULL) { prefetch(&txq->ift_sds.ifsd_map[next]); next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); prefetch(&txq->ift_sds.ifsd_flags[next]); } } else if (txq->ift_sds.ifsd_map != NULL) map = txq->ift_sds.ifsd_map[pidx]; if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { desc_tag = txq->ift_tso_desc_tag; max_segs = scctx->isc_tx_tso_segments_max; } else { desc_tag = txq->ift_desc_tag; max_segs = scctx->isc_tx_nsegments; } m_head = *m_headp; pkt_info_zero(&pi); pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); pi.ipi_pidx = pidx; pi.ipi_qsidx = txq->ift_id; pi.ipi_len = m_head->m_pkthdr.len; pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0; /* deliberate bitwise OR to make one condition */ if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) return (err); m_head = *m_headp; } retry: err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT); defrag: if (__predict_false(err)) { switch (err) { case EFBIG: /* try collapse once and defrag once */ if (remap == 0) m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); if (remap == 1) m_head = m_defrag(*m_headp, M_NOWAIT); remap++; if (__predict_false(m_head == NULL)) goto defrag_failed; txq->ift_mbuf_defrag++; *m_headp = m_head; goto retry; break; case ENOMEM: txq->ift_no_tx_dma_setup++; break; default: txq->ift_no_tx_dma_setup++; m_freem(*m_headp); DBG_COUNTER_INC(tx_frees); *m_headp = NULL; break; } txq->ift_map_failed++; DBG_COUNTER_INC(encap_load_mbuf_fail); return (err); } /* * XXX assumes a 1 to 1 relationship between segments and * descriptors - this does not hold true on all drivers, e.g. * cxgb */ if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { txq->ift_no_desc_avail++; if (map != NULL) bus_dmamap_unload(desc_tag, map); DBG_COUNTER_INC(encap_txq_avail_fail); if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) GROUPTASK_ENQUEUE(&txq->ift_task); return (ENOBUFS); } /* * On Intel cards we can greatly reduce the number of TX interrupts * we see by only setting report status on every Nth descriptor. * However, this also means that the driver will need to keep track * of the descriptors that RS was set on to check them for the DD bit. */ txq->ift_rs_pending += nsegs + 1; if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs - 1) <= MAX_TX_DESC(ctx)) { pi.ipi_flags |= IPI_TX_INTR; txq->ift_rs_pending = 0; } pi.ipi_segs = segs; pi.ipi_nsegs = nsegs; MPASS(pidx >= 0 && pidx < txq->ift_size); #ifdef PKT_DEBUG print_pkt(&pi); #endif if (map != NULL) bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE); if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { if (map != NULL) bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); DBG_COUNTER_INC(tx_encap); MPASS(pi.ipi_new_pidx < txq->ift_size); ndesc = pi.ipi_new_pidx - pi.ipi_pidx; if (pi.ipi_new_pidx < pi.ipi_pidx) { ndesc += txq->ift_size; txq->ift_gen = 1; } /* * drivers can need as many as * two sentinels */ MPASS(ndesc <= pi.ipi_nsegs + 2); MPASS(pi.ipi_new_pidx != pidx); MPASS(ndesc > 0); txq->ift_in_use += ndesc; /* * We update the last software descriptor again here because there may * be a sentinel and/or there may be more mbufs than segments */ txq->ift_pidx = pi.ipi_new_pidx; txq->ift_npending += pi.ipi_ndescs; } else if (__predict_false(err == EFBIG && remap < 2)) { *m_headp = m_head = iflib_remove_mbuf(txq); remap = 1; txq->ift_txd_encap_efbig++; goto defrag; } else DBG_COUNTER_INC(encap_txd_encap_fail); return (err); defrag_failed: txq->ift_mbuf_defrag_failed++; txq->ift_map_failed++; m_freem(*m_headp); DBG_COUNTER_INC(tx_frees); *m_headp = NULL; return (ENOMEM); } static void iflib_tx_desc_free(iflib_txq_t txq, int n) { int hasmap; uint32_t qsize, cidx, mask, gen; struct mbuf *m, **ifsd_m; uint8_t *ifsd_flags; bus_dmamap_t *ifsd_map; bool do_prefetch; cidx = txq->ift_cidx; gen = txq->ift_gen; qsize = txq->ift_size; mask = qsize-1; hasmap = txq->ift_sds.ifsd_map != NULL; ifsd_flags = txq->ift_sds.ifsd_flags; ifsd_m = txq->ift_sds.ifsd_m; ifsd_map = txq->ift_sds.ifsd_map; do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); while (n--) { if (do_prefetch) { prefetch(ifsd_m[(cidx + 3) & mask]); prefetch(ifsd_m[(cidx + 4) & mask]); } if (ifsd_m[cidx] != NULL) { prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]); if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) { /* * does it matter if it's not the TSO tag? If so we'll * have to add the type to flags */ bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]); ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED; } if ((m = ifsd_m[cidx]) != NULL) { /* XXX we don't support any drivers that batch packets yet */ MPASS(m->m_nextpkt == NULL); /* if the number of clusters exceeds the number of segments * there won't be space on the ring to save a pointer to each * cluster so we simply free the list here */ if (m->m_flags & M_TOOBIG) { m_freem(m); } else { m_free(m); } ifsd_m[cidx] = NULL; #if MEMORY_LOGGING txq->ift_dequeued++; #endif DBG_COUNTER_INC(tx_frees); } } if (__predict_false(++cidx == qsize)) { cidx = 0; gen = 0; } } txq->ift_cidx = cidx; txq->ift_gen = gen; } static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) { int reclaim; if_ctx_t ctx = txq->ift_ctx; KASSERT(thresh >= 0, ("invalid threshold to reclaim")); MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); /* * Need a rate-limiting check so that this isn't called every time */ iflib_tx_credits_update(ctx, txq); reclaim = DESC_RECLAIMABLE(txq); if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { #ifdef INVARIANTS if (iflib_verbose_debug) { printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, reclaim, thresh); } #endif return (0); } iflib_tx_desc_free(txq, reclaim); txq->ift_cleaned += reclaim; txq->ift_in_use -= reclaim; return (reclaim); } static struct mbuf ** _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining) { int next, size; struct mbuf **items; size = r->size; next = (cidx + CACHE_PTR_INCREMENT) & (size-1); items = __DEVOLATILE(struct mbuf **, &r->items[0]); prefetch(items[(cidx + offset) & (size-1)]); if (remaining > 1) { prefetch2cachelines(&items[next]); prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]); prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]); prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]); } return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)])); } static void iflib_txq_check_drain(iflib_txq_t txq, int budget) { ifmp_ring_check_drainage(txq->ift_br, budget); } static uint32_t iflib_txq_can_drain(struct ifmp_ring *r) { iflib_txq_t txq = r->cookie; if_ctx_t ctx = txq->ift_ctx; return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) || ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)); } static uint32_t iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) { iflib_txq_t txq = r->cookie; if_ctx_t ctx = txq->ift_ctx; struct ifnet *ifp = ctx->ifc_ifp; struct mbuf **mp, *m; int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail; int reclaimed, err, in_use_prev, desc_used; bool do_prefetch, ring, rang; if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || !LINK_ACTIVE(ctx))) { DBG_COUNTER_INC(txq_drain_notready); return (0); } reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use); avail = IDXDIFF(pidx, cidx, r->size); if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { DBG_COUNTER_INC(txq_drain_flushing); for (i = 0; i < avail; i++) { m_free(r->items[(cidx + i) & (r->size-1)]); r->items[(cidx + i) & (r->size-1)] = NULL; } return (avail); } if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { txq->ift_qstatus = IFLIB_QUEUE_IDLE; CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); DBG_COUNTER_INC(txq_drain_oactive); return (0); } if (reclaimed) txq->ift_qstatus = IFLIB_QUEUE_IDLE; consumed = mcast_sent = bytes_sent = pkt_sent = 0; count = MIN(avail, TX_BATCH_SIZE); #ifdef INVARIANTS if (iflib_verbose_debug) printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, avail, ctx->ifc_flags, TXQ_AVAIL(txq)); #endif do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); avail = TXQ_AVAIL(txq); for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) { int pidx_prev, rem = do_prefetch ? count - i : 0; mp = _ring_peek_one(r, cidx, i, rem); MPASS(mp != NULL && *mp != NULL); if (__predict_false(*mp == (struct mbuf *)txq)) { consumed++; reclaimed++; continue; } in_use_prev = txq->ift_in_use; pidx_prev = txq->ift_pidx; err = iflib_encap(txq, mp); if (__predict_false(err)) { DBG_COUNTER_INC(txq_drain_encapfail); /* no room - bail out */ if (err == ENOBUFS) break; consumed++; DBG_COUNTER_INC(txq_drain_encapfail); /* we can't send this packet - skip it */ continue; } consumed++; pkt_sent++; m = *mp; DBG_COUNTER_INC(tx_sent); bytes_sent += m->m_pkthdr.len; mcast_sent += !!(m->m_flags & M_MCAST); avail = TXQ_AVAIL(txq); txq->ift_db_pending += (txq->ift_in_use - in_use_prev); desc_used += (txq->ift_in_use - in_use_prev); ETHER_BPF_MTAP(ifp, m); if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) break; rang = iflib_txd_db_check(ctx, txq, false, in_use_prev); } /* deliberate use of bitwise or to avoid gratuitous short-circuit */ ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)); iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use); if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); if (mcast_sent) if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); #ifdef INVARIANTS if (iflib_verbose_debug) printf("consumed=%d\n", consumed); #endif return (consumed); } static uint32_t iflib_txq_drain_always(struct ifmp_ring *r) { return (1); } static uint32_t iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) { int i, avail; struct mbuf **mp; iflib_txq_t txq; txq = r->cookie; txq->ift_qstatus = IFLIB_QUEUE_IDLE; CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); avail = IDXDIFF(pidx, cidx, r->size); for (i = 0; i < avail; i++) { mp = _ring_peek_one(r, cidx, i, avail - i); if (__predict_false(*mp == (struct mbuf *)txq)) continue; m_freem(*mp); } MPASS(ifmp_ring_is_stalled(r) == 0); return (avail); } static void iflib_ifmp_purge(iflib_txq_t txq) { struct ifmp_ring *r; r = txq->ift_br; r->drain = iflib_txq_drain_free; r->can_drain = iflib_txq_drain_always; ifmp_ring_check_drainage(r, r->size); r->drain = iflib_txq_drain; r->can_drain = iflib_txq_can_drain; } static void _task_fn_tx(void *context) { iflib_txq_t txq = context; if_ctx_t ctx = txq->ift_ctx; struct ifnet *ifp = ctx->ifc_ifp; int rc; #ifdef IFLIB_DIAGNOSTICS txq->ift_cpu_exec_count[curcpu]++; #endif if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; if (if_getcapenable(ifp) & IFCAP_NETMAP) { if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)) netmap_tx_irq(ifp, txq->ift_id); IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); return; } if (txq->ift_db_pending) ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE); ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else { rc = IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); } } static void _task_fn_rx(void *context) { iflib_rxq_t rxq = context; if_ctx_t ctx = rxq->ifr_ctx; bool more; int rc; uint16_t budget; #ifdef IFLIB_DIAGNOSTICS rxq->ifr_cpu_exec_count[curcpu]++; #endif DBG_COUNTER_INC(task_fn_rxs); if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; more = true; #ifdef DEV_NETMAP if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) { u_int work = 0; if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) { more = false; } } #endif budget = ctx->ifc_sysctl_rx_budget; if (budget == 0) budget = 16; /* XXX */ if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else { DBG_COUNTER_INC(rx_intr_enables); rc = IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); } } if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; if (more) GROUPTASK_ENQUEUE(&rxq->ifr_task); } static void _task_fn_admin(void *context) { if_ctx_t ctx = context; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; iflib_txq_t txq; int i; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) { if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { return; } } CTX_LOCK(ctx); for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); } IFDI_UPDATE_ADMIN_STATUS(ctx); for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); IFDI_LINK_INTR_ENABLE(ctx); if (ctx->ifc_flags & IFC_DO_RESET) { ctx->ifc_flags &= ~IFC_DO_RESET; iflib_if_init_locked(ctx); } CTX_UNLOCK(ctx); if (LINK_ACTIVE(ctx) == 0) return; for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); } static void _task_fn_iov(void *context) { if_ctx_t ctx = context; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; CTX_LOCK(ctx); IFDI_VFLR_HANDLE(ctx); CTX_UNLOCK(ctx); } static int iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { int err; if_int_delay_info_t info; if_ctx_t ctx; info = (if_int_delay_info_t)arg1; ctx = info->iidi_ctx; info->iidi_req = req; info->iidi_oidp = oidp; CTX_LOCK(ctx); err = IFDI_SYSCTL_INT_DELAY(ctx, info); CTX_UNLOCK(ctx); return (err); } /********************************************************************* * * IFNET FUNCTIONS * **********************************************************************/ static void iflib_if_init_locked(if_ctx_t ctx) { iflib_stop(ctx); iflib_init_locked(ctx); } static void iflib_if_init(void *arg) { if_ctx_t ctx = arg; CTX_LOCK(ctx); iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static int iflib_if_transmit(if_t ifp, struct mbuf *m) { if_ctx_t ctx = if_getsoftc(ifp); iflib_txq_t txq; int err, qidx; if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { DBG_COUNTER_INC(tx_frees); m_freem(m); return (ENOBUFS); } MPASS(m->m_nextpkt == NULL); qidx = 0; if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m)) qidx = QIDX(ctx, m); /* * XXX calculate buf_ring based on flowid (divvy up bits?) */ txq = &ctx->ifc_txqs[qidx]; #ifdef DRIVER_BACKPRESSURE if (txq->ift_closed) { while (m != NULL) { next = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); m = next; } return (ENOBUFS); } #endif #ifdef notyet qidx = count = 0; mp = marr; next = m; do { count++; next = next->m_nextpkt; } while (next != NULL); if (count > nitems(marr)) if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { /* XXX check nextpkt */ m_freem(m); /* XXX simplify for now */ DBG_COUNTER_INC(tx_frees); return (ENOBUFS); } for (next = m, i = 0; next != NULL; i++) { mp[i] = next; next = next->m_nextpkt; mp[i]->m_nextpkt = NULL; } #endif DBG_COUNTER_INC(tx_seen); err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE); GROUPTASK_ENQUEUE(&txq->ift_task); if (err) { /* support forthcoming later */ #ifdef DRIVER_BACKPRESSURE txq->ift_closed = TRUE; #endif ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); m_freem(m); } return (err); } static void iflib_if_qflush(if_t ifp) { if_ctx_t ctx = if_getsoftc(ifp); iflib_txq_t txq = ctx->ifc_txqs; int i; CTX_LOCK(ctx); ctx->ifc_flags |= IFC_QFLUSH; CTX_UNLOCK(ctx); for (i = 0; i < NTXQSETS(ctx); i++, txq++) while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) iflib_txq_check_drain(txq, 0); CTX_LOCK(ctx); ctx->ifc_flags &= ~IFC_QFLUSH; CTX_UNLOCK(ctx); if_qflush(ifp); } #define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \ IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO) static int iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) { if_ctx_t ctx = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; #endif bool avoid_reset = FALSE; int err = 0, reinit = 0, bits; switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { if_setflagbits(ifp, IFF_UP,0); if (!(if_getdrvflags(ifp)& IFF_DRV_RUNNING)) reinit = 1; #ifdef INET if (!(if_getflags(ifp) & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else err = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: CTX_LOCK(ctx); if (ifr->ifr_mtu == if_getmtu(ifp)) { CTX_UNLOCK(ctx); break; } bits = if_getdrvflags(ifp); /* stop the driver and free any clusters before proceeding */ iflib_stop(ctx); if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) ctx->ifc_flags |= IFC_MULTISEG; else ctx->ifc_flags &= ~IFC_MULTISEG; err = if_setmtu(ifp, ifr->ifr_mtu); } iflib_init_locked(ctx); if_setdrvflags(ifp, bits); CTX_UNLOCK(ctx); break; case SIOCSIFFLAGS: CTX_LOCK(ctx); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); } } else reinit = 1; } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { iflib_stop(ctx); } ctx->ifc_if_flags = if_getflags(ifp); CTX_UNLOCK(ctx); break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { CTX_LOCK(ctx); IFDI_INTR_DISABLE(ctx); IFDI_MULTI_SET(ctx); IFDI_INTR_ENABLE(ctx); CTX_UNLOCK(ctx); } break; case SIOCSIFMEDIA: CTX_LOCK(ctx); IFDI_MEDIA_SET(ctx); CTX_UNLOCK(ctx); /* falls thru */ case SIOCGIFMEDIA: err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command); break; case SIOCGI2C: { struct ifi2creq i2c; err = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); if (err != 0) break; if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { err = EINVAL; break; } if (i2c.len > sizeof(i2c.data)) { err = EINVAL; break; } if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) err = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); break; } case SIOCSIFCAP: { int mask, setmask; mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); setmask = 0; #ifdef TCP_OFFLOAD setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); #endif setmask |= (mask & IFCAP_FLAGS); if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); if ((mask & IFCAP_WOL) && (if_getcapabilities(ifp) & IFCAP_WOL) != 0) setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC)); if_vlancap(ifp); /* * want to ensure that traffic has stopped before we change any of the flags */ if (setmask) { CTX_LOCK(ctx); bits = if_getdrvflags(ifp); if (bits & IFF_DRV_RUNNING) iflib_stop(ctx); if_togglecapenable(ifp, setmask); if (bits & IFF_DRV_RUNNING) iflib_init_locked(ctx); if_setdrvflags(ifp, bits); CTX_UNLOCK(ctx); } break; } case SIOCGPRIVATE_0: case SIOCSDRVSPEC: case SIOCGDRVSPEC: CTX_LOCK(ctx); err = IFDI_PRIV_IOCTL(ctx, command, data); CTX_UNLOCK(ctx); break; default: err = ether_ioctl(ifp, command, data); break; } if (reinit) iflib_if_init(ctx); return (err); } static uint64_t iflib_if_get_counter(if_t ifp, ift_counter cnt) { if_ctx_t ctx = if_getsoftc(ifp); return (IFDI_GET_COUNTER(ctx, cnt)); } /********************************************************************* * * OTHER FUNCTIONS EXPORTED TO THE STACK * **********************************************************************/ static void iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) { if_ctx_t ctx = if_getsoftc(ifp); if ((void *)ctx != arg) return; if ((vtag == 0) || (vtag > 4095)) return; CTX_LOCK(ctx); IFDI_VLAN_REGISTER(ctx, vtag); /* Re-init to load the changes */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) { if_ctx_t ctx = if_getsoftc(ifp); if ((void *)ctx != arg) return; if ((vtag == 0) || (vtag > 4095)) return; CTX_LOCK(ctx); IFDI_VLAN_UNREGISTER(ctx, vtag); /* Re-init to load the changes */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_led_func(void *arg, int onoff) { if_ctx_t ctx = arg; CTX_LOCK(ctx); IFDI_LED_FUNC(ctx, onoff); CTX_UNLOCK(ctx); } /********************************************************************* * * BUS FUNCTION DEFINITIONS * **********************************************************************/ int iflib_device_probe(device_t dev) { pci_vendor_info_t *ent; uint16_t pci_vendor_id, pci_device_id; uint16_t pci_subvendor_id, pci_subdevice_id; uint16_t pci_rev_id; if_shared_ctx_t sctx; if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) return (ENOTSUP); pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); pci_rev_id = pci_get_revid(dev); if (sctx->isc_parse_devinfo != NULL) sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); ent = sctx->isc_vendor_info; while (ent->pvi_vendor_id != 0) { if (pci_vendor_id != ent->pvi_vendor_id) { ent++; continue; } if ((pci_device_id == ent->pvi_device_id) && ((pci_subvendor_id == ent->pvi_subvendor_id) || (ent->pvi_subvendor_id == 0)) && ((pci_subdevice_id == ent->pvi_subdevice_id) || (ent->pvi_subdevice_id == 0)) && ((pci_rev_id == ent->pvi_rev_id) || (ent->pvi_rev_id == 0))) { device_set_desc_copy(dev, ent->pvi_name); /* this needs to be changed to zero if the bus probing code * ever stops re-probing on best match because the sctx * may have its values over written by register calls * in subsequent probes */ return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } int iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) { int err, rid, msix, msix_bar; if_ctx_t ctx; if_t ifp; if_softc_ctx_t scctx; int i; uint16_t main_txq; uint16_t main_rxq; ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); if (sc == NULL) { sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); device_set_softc(dev, ctx); ctx->ifc_flags |= IFC_SC_ALLOCATED; } ctx->ifc_sctx = sctx; ctx->ifc_dev = dev; ctx->ifc_softc = sc; if ((err = iflib_register(ctx)) != 0) { device_printf(dev, "iflib_register failed %d\n", err); return (err); } iflib_add_device_sysctl_pre(ctx); scctx = &ctx->ifc_softc_ctx; ifp = ctx->ifc_ifp; /* * XXX sanity check that ntxd & nrxd are a power of 2 */ if (ctx->ifc_sysctl_ntxqs != 0) scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; if (ctx->ifc_sysctl_nrxqs != 0) scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; for (i = 0; i < sctx->isc_ntxqs; i++) { if (ctx->ifc_sysctl_ntxds[i] != 0) scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; else scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; } for (i = 0; i < sctx->isc_nrxqs; i++) { if (ctx->ifc_sysctl_nrxds[i] != 0) scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; else scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; } for (i = 0; i < sctx->isc_nrxqs; i++) { if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; } if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; } } for (i = 0; i < sctx->isc_ntxqs; i++) { if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; } if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; } } if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); return (err); } _iflib_pre_assert(scctx); ctx->ifc_txrx = *scctx->isc_txrx; #ifdef INVARIANTS MPASS(scctx->isc_capenable); if (scctx->isc_capenable & IFCAP_TXCSUM) MPASS(scctx->isc_tx_csum_flags); #endif if_setcapabilities(ifp, scctx->isc_capenable | IFCAP_HWSTATS); if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS); if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; #ifdef ACPI_DMAR if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL) ctx->ifc_flags |= IFC_DMAR; #elif !(defined(__i386__) || defined(__amd64__)) /* set unconditionally for !x86 */ ctx->ifc_flags |= IFC_DMAR; #endif msix_bar = scctx->isc_msix_bar; main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; /* XXX change for per-queue sizes */ device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); for (i = 0; i < sctx->isc_nrxqs; i++) { if (!powerof2(scctx->isc_nrxd[i])) { /* round down instead? */ device_printf(dev, "# rx descriptors must be a power of 2\n"); err = EINVAL; goto fail; } } for (i = 0; i < sctx->isc_ntxqs; i++) { if (!powerof2(scctx->isc_ntxd[i])) { device_printf(dev, "# tx descriptors must be a power of 2"); err = EINVAL; goto fail; } } if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_tso_segments_max = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); /* * Protect the stack against modern hardware */ if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX) scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX; /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max; ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max; ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max; if (scctx->isc_rss_table_size == 0) scctx->isc_rss_table_size = 64; scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); /* XXX format name */ taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); /* ** Now setup MSI or MSI/X, should ** return us the number of supported ** vectors. (Will be 1 for MSI) */ if (sctx->isc_flags & IFLIB_SKIP_MSIX) { msix = scctx->isc_vectors; } else if (scctx->isc_msix_bar != 0) /* * The simple fact that isc_msix_bar is not 0 does not mean we * we have a good value there that is known to work. */ msix = iflib_msix_init(ctx); else { scctx->isc_vectors = 1; scctx->isc_ntxqsets = 1; scctx->isc_nrxqsets = 1; scctx->isc_intr = IFLIB_INTR_LEGACY; msix = 0; } /* Get memory for the station queues */ if ((err = iflib_queues_alloc(ctx))) { device_printf(dev, "Unable to allocate queue memory\n"); goto fail; } if ((err = iflib_qset_structures_setup(ctx))) { device_printf(dev, "qset structure setup failed %d\n", err); goto fail_queues; } /* * Group taskqueues aren't properly set up until SMP is started, * so we disable interrupts until we can handle them post * SI_SUB_SMP. * * XXX: disabling interrupts doesn't actually work, at least for * the non-MSI case. When they occur before SI_SUB_SMP completes, * we do null handling and depend on this not causing too large an * interrupt storm. */ IFDI_INTR_DISABLE(ctx); if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) { device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err); goto fail_intr_free; } if (msix <= 1) { rid = 0; if (scctx->isc_intr == IFLIB_INTR_MSI) { MPASS(msix == 1); rid = 1; } if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { device_printf(dev, "iflib_legacy_setup failed %d\n", err); goto fail_intr_free; } } ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); if ((err = IFDI_ATTACH_POST(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); goto fail_detach; } if ((err = iflib_netmap_attach(ctx))) { device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); goto fail_detach; } *ctxp = ctx; if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); ctx->ifc_flags |= IFC_INIT_DONE; return (0); fail_detach: ether_ifdetach(ctx->ifc_ifp); fail_intr_free: if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI) pci_release_msi(ctx->ifc_dev); fail_queues: /* XXX free queues */ fail: IFDI_DETACH(ctx); return (err); } int iflib_device_attach(device_t dev) { if_ctx_t ctx; if_shared_ctx_t sctx; if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) return (ENOTSUP); pci_enable_busmaster(dev); return (iflib_device_register(dev, NULL, sctx, &ctx)); } int iflib_device_deregister(if_ctx_t ctx) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq; iflib_rxq_t rxq; device_t dev = ctx->ifc_dev; int i, j; struct taskqgroup *tqg; iflib_fl_t fl; /* Make sure VLANS are not using driver */ if (if_vlantrunkinuse(ifp)) { device_printf(dev,"Vlan in use, detach first\n"); return (EBUSY); } CTX_LOCK(ctx); ctx->ifc_in_detach = 1; iflib_stop(ctx); CTX_UNLOCK(ctx); /* Unregister VLAN events */ if (ctx->ifc_vlan_attach_event != NULL) EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); if (ctx->ifc_vlan_detach_event != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); iflib_netmap_detach(ifp); ether_ifdetach(ifp); /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ CTX_LOCK_DESTROY(ctx); if (ctx->ifc_led_dev != NULL) led_destroy(ctx->ifc_led_dev); /* XXX drain any dependent tasks */ tqg = qgroup_if_io_tqg; for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { callout_drain(&txq->ift_timer); if (txq->ift_task.gt_uniq != NULL) taskqgroup_detach(tqg, &txq->ift_task); } for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { if (rxq->ifr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &rxq->ifr_task); for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) free(fl->ifl_rx_bitmap, M_IFLIB); } tqg = qgroup_if_config_tqg; if (ctx->ifc_admin_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_admin_task); if (ctx->ifc_vflr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_vflr_task); IFDI_DETACH(ctx); device_set_softc(ctx->ifc_dev, NULL); if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { pci_release_msi(dev); } if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { iflib_irq_free(ctx, &ctx->ifc_legacy_irq); } if (ctx->ifc_msix_mem != NULL) { bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; } bus_generic_detach(dev); if_free(ifp); iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); free(ctx, M_IFLIB); return (0); } int iflib_device_detach(device_t dev) { if_ctx_t ctx = device_get_softc(dev); return (iflib_device_deregister(ctx)); } int iflib_device_suspend(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_SUSPEND(ctx); CTX_UNLOCK(ctx); return bus_generic_suspend(dev); } int iflib_device_shutdown(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_SHUTDOWN(ctx); CTX_UNLOCK(ctx); return bus_generic_suspend(dev); } int iflib_device_resume(device_t dev) { if_ctx_t ctx = device_get_softc(dev); iflib_txq_t txq = ctx->ifc_txqs; CTX_LOCK(ctx); IFDI_RESUME(ctx); iflib_init_locked(ctx); CTX_UNLOCK(ctx); for (int i = 0; i < NTXQSETS(ctx); i++, txq++) iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); return (bus_generic_resume(dev)); } int iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) { int error; if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); error = IFDI_IOV_INIT(ctx, num_vfs, params); CTX_UNLOCK(ctx); return (error); } void iflib_device_iov_uninit(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_IOV_UNINIT(ctx); CTX_UNLOCK(ctx); } int iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) { int error; if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); error = IFDI_IOV_VF_ADD(ctx, vfnum, params); CTX_UNLOCK(ctx); return (error); } /********************************************************************* * * MODULE FUNCTION DEFINITIONS * **********************************************************************/ /* * - Start a fast taskqueue thread for each core * - Start a taskqueue for control operations */ static int iflib_module_init(void) { return (0); } static int iflib_module_event_handler(module_t mod, int what, void *arg) { int err; switch (what) { case MOD_LOAD: if ((err = iflib_module_init()) != 0) return (err); break; case MOD_UNLOAD: return (EBUSY); default: return (EOPNOTSUPP); } return (0); } /********************************************************************* * * PUBLIC FUNCTION DEFINITIONS * ordered as in iflib.h * **********************************************************************/ static void _iflib_assert(if_shared_ctx_t sctx) { MPASS(sctx->isc_tx_maxsize); MPASS(sctx->isc_tx_maxsegsize); MPASS(sctx->isc_rx_maxsize); MPASS(sctx->isc_rx_nsegments); MPASS(sctx->isc_rx_maxsegsize); MPASS(sctx->isc_nrxd_min[0]); MPASS(sctx->isc_nrxd_max[0]); MPASS(sctx->isc_nrxd_default[0]); MPASS(sctx->isc_ntxd_min[0]); MPASS(sctx->isc_ntxd_max[0]); MPASS(sctx->isc_ntxd_default[0]); } static void _iflib_pre_assert(if_softc_ctx_t scctx) { MPASS(scctx->isc_txrx->ift_txd_encap); MPASS(scctx->isc_txrx->ift_txd_flush); MPASS(scctx->isc_txrx->ift_txd_credits_update); MPASS(scctx->isc_txrx->ift_rxd_available); MPASS(scctx->isc_txrx->ift_rxd_pkt_get); MPASS(scctx->isc_txrx->ift_rxd_refill); MPASS(scctx->isc_txrx->ift_rxd_flush); } static int iflib_register(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; driver_t *driver = sctx->isc_driver; device_t dev = ctx->ifc_dev; if_t ifp; _iflib_assert(sctx); CTX_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (ENOMEM); } /* * Initialize our context's device specific methods */ kobj_init((kobj_t) ctx, (kobj_class_t) driver); kobj_class_compile((kobj_class_t) driver); driver->refs++; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setsoftc(ifp, ctx); if_setdev(ifp, dev); if_setinitfn(ifp, iflib_if_init); if_setioctlfn(ifp, iflib_if_ioctl); if_settransmitfn(ifp, iflib_if_transmit); if_setqflushfn(ifp, iflib_if_qflush); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); ctx->ifc_vlan_attach_event = EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, EVENTHANDLER_PRI_FIRST); ctx->ifc_vlan_detach_event = EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, EVENTHANDLER_PRI_FIRST); ifmedia_init(&ctx->ifc_media, IFM_IMASK, iflib_media_change, iflib_media_status); return (0); } static int iflib_queues_alloc(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; int nrxqsets = scctx->isc_nrxqsets; int ntxqsets = scctx->isc_ntxqsets; iflib_txq_t txq; iflib_rxq_t rxq; iflib_fl_t fl = NULL; int i, j, cpu, err, txconf, rxconf; iflib_dma_info_t ifdip; uint32_t *rxqsizes = scctx->isc_rxqsizes; uint32_t *txqsizes = scctx->isc_txqsizes; uint8_t nrxqs = sctx->isc_nrxqs; uint8_t ntxqs = sctx->isc_ntxqs; int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; caddr_t *vaddrs; uint64_t *paddrs; struct ifmp_ring **brscp; KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); brscp = NULL; txq = NULL; rxq = NULL; /* Allocate the TX ring struct memory */ if (!(txq = (iflib_txq_t) malloc(sizeof(struct iflib_txq) * ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); err = ENOMEM; goto fail; } /* Now allocate the RX */ if (!(rxq = (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX ring memory\n"); err = ENOMEM; goto rx_fail; } ctx->ifc_txqs = txq; ctx->ifc_rxqs = rxq; /* * XXX handle allocation failure */ for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { /* Set up some basics */ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { device_printf(dev, "failed to allocate iflib_dma_info\n"); err = ENOMEM; goto err_tx_desc; } txq->ift_ifdi = ifdip; for (j = 0; j < ntxqs; j++, ifdip++) { if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate Descriptor memory\n"); err = ENOMEM; goto err_tx_desc; } txq->ift_txd_size[j] = scctx->isc_txd_size[j]; bzero((void *)ifdip->idi_vaddr, txqsizes[j]); } txq->ift_ctx = ctx; txq->ift_id = i; if (sctx->isc_flags & IFLIB_HAS_TXCQ) { txq->ift_br_offset = 1; } else { txq->ift_br_offset = 0; } /* XXX fix this */ txq->ift_timer.c_cpu = cpu; if (iflib_txsd_alloc(txq)) { device_printf(dev, "Critical Failure setting up TX buffers\n"); err = ENOMEM; goto err_tx_desc; } /* Initialize the TX lock */ snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout", device_get_nameunit(dev), txq->ift_id); mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db", device_get_nameunit(dev), txq->ift_id); err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, iflib_txq_can_drain, M_IFLIB, M_WAITOK); if (err) { /* XXX free any allocated rings */ device_printf(dev, "Unable to allocate buf_ring\n"); goto err_tx_desc; } } for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { /* Set up some basics */ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { device_printf(dev, "failed to allocate iflib_dma_info\n"); err = ENOMEM; goto err_tx_desc; } rxq->ifr_ifdi = ifdip; /* XXX this needs to be changed if #rx queues != #tx queues */ rxq->ifr_ntxqirq = 1; rxq->ifr_txqid[0] = i; for (j = 0; j < nrxqs; j++, ifdip++) { if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate Descriptor memory\n"); err = ENOMEM; goto err_tx_desc; } bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); } rxq->ifr_ctx = ctx; rxq->ifr_id = i; if (sctx->isc_flags & IFLIB_HAS_RXCQ) { rxq->ifr_fl_offset = 1; } else { rxq->ifr_fl_offset = 0; } rxq->ifr_nfl = nfree_lists; if (!(fl = (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate free list memory\n"); err = ENOMEM; goto err_tx_desc; } rxq->ifr_fl = fl; for (j = 0; j < nfree_lists; j++) { fl[j].ifl_rxq = rxq; fl[j].ifl_id = j; fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; } /* Allocate receive buffers for the ring*/ if (iflib_rxsd_alloc(rxq)) { device_printf(dev, "Critical Failure setting up receive buffers\n"); err = ENOMEM; goto err_rx_desc; } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO); } /* TXQs */ vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); for (i = 0; i < ntxqsets; i++) { iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; for (j = 0; j < ntxqs; j++, di++) { vaddrs[i*ntxqs + j] = di->idi_vaddr; paddrs[i*ntxqs + j] = di->idi_paddr; } } if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { device_printf(ctx->ifc_dev, "device queue allocation failed\n"); iflib_tx_structures_free(ctx); free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); goto err_rx_desc; } free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); /* RXQs */ vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); for (i = 0; i < nrxqsets; i++) { iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; for (j = 0; j < nrxqs; j++, di++) { vaddrs[i*nrxqs + j] = di->idi_vaddr; paddrs[i*nrxqs + j] = di->idi_paddr; } } if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { device_printf(ctx->ifc_dev, "device queue allocation failed\n"); iflib_tx_structures_free(ctx); free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); goto err_rx_desc; } free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); return (0); /* XXX handle allocation failure changes */ err_rx_desc: err_tx_desc: if (ctx->ifc_rxqs != NULL) free(ctx->ifc_rxqs, M_IFLIB); ctx->ifc_rxqs = NULL; if (ctx->ifc_txqs != NULL) free(ctx->ifc_txqs, M_IFLIB); ctx->ifc_txqs = NULL; rx_fail: if (brscp != NULL) free(brscp, M_IFLIB); if (rxq != NULL) free(rxq, M_IFLIB); if (txq != NULL) free(txq, M_IFLIB); fail: return (err); } static int iflib_tx_structures_setup(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; int i; for (i = 0; i < NTXQSETS(ctx); i++, txq++) iflib_txq_setup(txq); return (0); } static void iflib_tx_structures_free(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; int i, j; for (i = 0; i < NTXQSETS(ctx); i++, txq++) { iflib_txq_destroy(txq); for (j = 0; j < ctx->ifc_nhwtxqs; j++) iflib_dma_free(&txq->ift_ifdi[j]); } free(ctx->ifc_txqs, M_IFLIB); ctx->ifc_txqs = NULL; IFDI_QUEUES_FREE(ctx); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ static int iflib_rx_structures_setup(if_ctx_t ctx) { iflib_rxq_t rxq = ctx->ifc_rxqs; int q; #if defined(INET6) || defined(INET) int i, err; #endif for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { #if defined(INET6) || defined(INET) tcp_lro_free(&rxq->ifr_lc); if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, TCP_LRO_ENTRIES, min(1024, ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) { device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); goto fail; } rxq->ifr_lro_enabled = TRUE; #endif IFDI_RXQ_SETUP(ctx, rxq->ifr_id); } return (0); #if defined(INET6) || defined(INET) fail: /* * Free RX software descriptors allocated so far, we will only handle * the rings that completed, the failing case will have * cleaned up for itself. 'q' failed, so its the terminus. */ rxq = ctx->ifc_rxqs; for (i = 0; i < q; ++i, rxq++) { iflib_rx_sds_free(rxq); rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; } return (err); #endif } /********************************************************************* * * Free all receive rings. * **********************************************************************/ static void iflib_rx_structures_free(if_ctx_t ctx) { iflib_rxq_t rxq = ctx->ifc_rxqs; for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { iflib_rx_sds_free(rxq); } } static int iflib_qset_structures_setup(if_ctx_t ctx) { int err; if ((err = iflib_tx_structures_setup(ctx)) != 0) return (err); if ((err = iflib_rx_structures_setup(ctx)) != 0) { device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); } return (err); } int iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, char *name) { return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); } static int find_nth(if_ctx_t ctx, cpuset_t *cpus, int qid) { int i, cpuid, eqid, count; CPU_COPY(&ctx->ifc_cpus, cpus); count = CPU_COUNT(&ctx->ifc_cpus); eqid = qid % count; /* clear up to the qid'th bit */ for (i = 0; i < eqid; i++) { cpuid = CPU_FFS(cpus); MPASS(cpuid != 0); CPU_CLR(cpuid-1, cpus); } cpuid = CPU_FFS(cpus); MPASS(cpuid != 0); return (cpuid-1); } int iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, iflib_intr_type_t type, driver_filter_t *filter, void *filter_arg, int qid, char *name) { struct grouptask *gtask; struct taskqgroup *tqg; iflib_filter_info_t info; cpuset_t cpus; gtask_fn_t *fn; int tqrid, err, cpuid; driver_filter_t *intr_fast; void *q; info = &ctx->ifc_filter_info; tqrid = rid; switch (type) { /* XXX merge tx/rx for netmap? */ case IFLIB_INTR_TX: q = &ctx->ifc_txqs[qid]; info = &ctx->ifc_txqs[qid].ift_filter_info; gtask = &ctx->ifc_txqs[qid].ift_task; tqg = qgroup_if_io_tqg; fn = _task_fn_tx; intr_fast = iflib_fast_intr; GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_RX: q = &ctx->ifc_rxqs[qid]; info = &ctx->ifc_rxqs[qid].ifr_filter_info; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr; GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_RXTX: q = &ctx->ifc_rxqs[qid]; info = &ctx->ifc_rxqs[qid].ifr_filter_info; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr_rxtx; GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_ADMIN: q = ctx; tqrid = -1; info = &ctx->ifc_filter_info; gtask = &ctx->ifc_admin_task; tqg = qgroup_if_config_tqg; fn = _task_fn_admin; intr_fast = iflib_fast_intr_ctx; break; default: panic("unknown net intr type"); } info->ifi_filter = filter; info->ifi_filter_arg = filter_arg; info->ifi_task = gtask; info->ifi_ctx = q; err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name); if (err != 0) { device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err); return (err); } if (type == IFLIB_INTR_ADMIN) return (0); if (tqrid != -1) { cpuid = find_nth(ctx, &cpus, qid); taskqgroup_attach_cpu(tqg, gtask, q, cpuid, rman_get_start(irq->ii_res), name); } else { taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); } return (0); } void iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, char *name) { struct grouptask *gtask; struct taskqgroup *tqg; gtask_fn_t *fn; void *q; int irq_num = -1; switch (type) { case IFLIB_INTR_TX: q = &ctx->ifc_txqs[qid]; gtask = &ctx->ifc_txqs[qid].ift_task; tqg = qgroup_if_io_tqg; fn = _task_fn_tx; if (irq != NULL) irq_num = rman_get_start(irq->ii_res); break; case IFLIB_INTR_RX: q = &ctx->ifc_rxqs[qid]; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; if (irq != NULL) irq_num = rman_get_start(irq->ii_res); break; case IFLIB_INTR_IOV: q = ctx; gtask = &ctx->ifc_vflr_task; tqg = qgroup_if_config_tqg; fn = _task_fn_iov; break; default: panic("unknown net intr type"); } GROUPTASK_INIT(gtask, 0, fn, q); taskqgroup_attach(tqg, gtask, q, irq_num, name); } void iflib_irq_free(if_ctx_t ctx, if_irq_t irq) { if (irq->ii_tag) bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); if (irq->ii_res) bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res); } static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, char *name) { iflib_txq_t txq = ctx->ifc_txqs; iflib_rxq_t rxq = ctx->ifc_rxqs; if_irq_t irq = &ctx->ifc_legacy_irq; iflib_filter_info_t info; struct grouptask *gtask; struct taskqgroup *tqg; gtask_fn_t *fn; int tqrid; void *q; int err; q = &ctx->ifc_rxqs[0]; info = &rxq[0].ifr_filter_info; gtask = &rxq[0].ifr_task; tqg = qgroup_if_io_tqg; tqrid = irq->ii_rid = *rid; fn = _task_fn_rx; ctx->ifc_flags |= IFC_LEGACY; info->ifi_filter = filter; info->ifi_filter_arg = filter_arg; info->ifi_task = gtask; info->ifi_ctx = ctx; /* We allocate a single interrupt resource */ if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0) return (err); GROUPTASK_INIT(gtask, 0, fn, q); taskqgroup_attach(tqg, gtask, q, tqrid, name); GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, tqrid, "tx"); return (0); } void iflib_led_create(if_ctx_t ctx) { ctx->ifc_led_dev = led_create(iflib_led_func, ctx, device_get_nameunit(ctx->ifc_dev)); } void iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) { GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); } void iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) { GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); } void iflib_admin_intr_deferred(if_ctx_t ctx) { #ifdef INVARIANTS struct grouptask *gtask; gtask = &ctx->ifc_admin_task; MPASS(gtask != NULL && gtask->gt_taskqueue != NULL); #endif GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); } void iflib_iov_intr_deferred(if_ctx_t ctx) { GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); } void iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name) { taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name); } void iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, gtask_fn_t *fn, char *name) { GROUPTASK_INIT(gtask, 0, fn, ctx); taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name); } void iflib_config_gtask_deinit(struct grouptask *gtask) { taskqgroup_detach(qgroup_if_config_tqg, gtask); } void iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq = ctx->ifc_txqs; if_setbaudrate(ifp, baudrate); if (baudrate >= IF_Gbps(10)) ctx->ifc_flags |= IFC_PREFETCH; /* If link down, disable watchdog */ if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) txq->ift_qstatus = IFLIB_QUEUE_IDLE; } ctx->ifc_link_state = link_state; if_link_state_change(ifp, link_state); } static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) { int credits; #ifdef INVARIANTS int credits_pre = txq->ift_cidx_processed; #endif if (ctx->isc_txd_credits_update == NULL) return (0); if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) return (0); txq->ift_processed += credits; txq->ift_cidx_processed += credits; MPASS(credits_pre + credits == txq->ift_cidx_processed); if (txq->ift_cidx_processed >= txq->ift_size) txq->ift_cidx_processed -= txq->ift_size; return (credits); } static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget) { return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, budget)); } void iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, const char *description, if_int_delay_info_t info, int offset, int value) { info->iidi_ctx = ctx; info->iidi_offset = offset; info->iidi_value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, info, 0, iflib_sysctl_int_delay, "I", description); } struct mtx * iflib_ctx_lock_get(if_ctx_t ctx) { return (&ctx->ifc_mtx); } static int iflib_msix_init(if_ctx_t ctx) { device_t dev = ctx->ifc_dev; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs; int iflib_num_tx_queues, iflib_num_rx_queues; int err, admincnt, bar; iflib_num_tx_queues = scctx->isc_ntxqsets; iflib_num_rx_queues = scctx->isc_nrxqsets; device_printf(dev, "msix_init qsets capped at %d\n", iflib_num_tx_queues); bar = ctx->ifc_softc_ctx.isc_msix_bar; admincnt = sctx->isc_admin_intrcnt; /* Override by global tuneable */ { int i; size_t len = sizeof(i); err = kernel_sysctlbyname(curthread, "hw.pci.enable_msix", &i, &len, NULL, 0, NULL, 0); if (err == 0) { if (i == 0) goto msi; } else { device_printf(dev, "unable to read hw.pci.enable_msix."); } } /* Override by tuneable */ if (scctx->isc_disable_msix) goto msi; /* ** When used in a virtualized environment ** PCI BUSMASTER capability may not be set ** so explicity set it here and rewrite ** the ENABLE in the MSIX control register ** at this point to cause the host to ** successfully initialize us. */ { int msix_ctrl, rid; pci_enable_busmaster(dev); rid = 0; if (pci_find_cap(dev, PCIY_MSIX, &rid) == 0 && rid != 0) { rid += PCIR_MSIX_CTRL; msix_ctrl = pci_read_config(dev, rid, 2); msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(dev, rid, msix_ctrl, 2); } else { device_printf(dev, "PCIY_MSIX capability not found; " "or rid %d == 0.\n", rid); goto msi; } } /* * bar == -1 => "trust me I know what I'm doing" * Some drivers are for hardware that is so shoddily * documented that no one knows which bars are which * so the developer has to map all bars. This hack * allows shoddy garbage to use msix in this framework. */ if (bar != -1) { ctx->ifc_msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE); if (ctx->ifc_msix_mem == NULL) { /* May not be enabled */ device_printf(dev, "Unable to map MSIX table \n"); goto msi; } } /* First try MSI/X */ if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */ device_printf(dev, "System has MSIX disabled \n"); bus_release_resource(dev, SYS_RES_MEMORY, bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; goto msi; } #if IFLIB_DEBUG /* use only 1 qset in debug mode */ queuemsgs = min(msgs - admincnt, 1); #else queuemsgs = msgs - admincnt; #endif if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) == 0) { #ifdef RSS queues = imin(queuemsgs, rss_getnumbuckets()); #else queues = queuemsgs; #endif queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n", CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); } else { device_printf(dev, "Unable to fetch CPU list\n"); /* Figure out a reasonable auto config value */ queues = min(queuemsgs, mp_ncpus); } #ifdef RSS /* If we're doing RSS, clamp at the number of RSS buckets */ if (queues > rss_getnumbuckets()) queues = rss_getnumbuckets(); #endif if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) rx_queues = iflib_num_rx_queues; else rx_queues = queues; /* * We want this to be all logical CPUs by default */ if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) tx_queues = iflib_num_tx_queues; else tx_queues = mp_ncpus; if (ctx->ifc_sysctl_qs_eq_override == 0) { #ifdef INVARIANTS if (tx_queues != rx_queues) device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", min(rx_queues, tx_queues), min(rx_queues, tx_queues)); #endif tx_queues = min(rx_queues, tx_queues); rx_queues = min(rx_queues, tx_queues); } device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues); vectors = rx_queues + admincnt; if ((err = pci_alloc_msix(dev, &vectors)) == 0) { device_printf(dev, "Using MSIX interrupts with %d vectors\n", vectors); scctx->isc_vectors = vectors; scctx->isc_nrxqsets = rx_queues; scctx->isc_ntxqsets = tx_queues; scctx->isc_intr = IFLIB_INTR_MSIX; return (vectors); } else { device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err); } msi: vectors = pci_msi_count(dev); scctx->isc_nrxqsets = 1; scctx->isc_ntxqsets = 1; scctx->isc_vectors = vectors; if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { device_printf(dev,"Using an MSI interrupt\n"); scctx->isc_intr = IFLIB_INTR_MSI; } else { device_printf(dev,"Using a Legacy interrupt\n"); scctx->isc_intr = IFLIB_INTR_LEGACY; } return (vectors); } char * ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; static int mp_ring_state_handler(SYSCTL_HANDLER_ARGS) { int rc; uint16_t *state = ((uint16_t *)oidp->oid_arg1); struct sbuf *sb; char *ring_state = "UNKNOWN"; /* XXX needed ? */ rc = sysctl_wire_old_buffer(req, 0); MPASS(rc == 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); MPASS(sb != NULL); if (sb == NULL) return (ENOMEM); if (state[3] <= 3) ring_state = ring_states[state[3]]; sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", state[0], state[1], state[2], ring_state); rc = sbuf_finish(sb); sbuf_delete(sb); return(rc); } enum iflib_ndesc_handler { IFLIB_NTXD_HANDLER, IFLIB_NRXD_HANDLER, }; static int mp_ndesc_handler(SYSCTL_HANDLER_ARGS) { if_ctx_t ctx = (void *)arg1; enum iflib_ndesc_handler type = arg2; char buf[256] = {0}; qidx_t *ndesc; char *p, *next; int nqs, rc, i; MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER); nqs = 8; switch(type) { case IFLIB_NTXD_HANDLER: ndesc = ctx->ifc_sysctl_ntxds; if (ctx->ifc_sctx) nqs = ctx->ifc_sctx->isc_ntxqs; break; case IFLIB_NRXD_HANDLER: ndesc = ctx->ifc_sysctl_nrxds; if (ctx->ifc_sctx) nqs = ctx->ifc_sctx->isc_nrxqs; break; } if (nqs == 0) nqs = 8; for (i=0; i<8; i++) { if (i >= nqs) break; if (i) strcat(buf, ","); sprintf(strchr(buf, 0), "%d", ndesc[i]); } rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (rc || req->newptr == NULL) return rc; for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; i++, p = strsep(&next, " ,")) { ndesc[i] = strtoul(p, NULL, 10); } return(rc); } #define NAME_BUFLEN 32 static void iflib_add_device_sysctl_pre(if_ctx_t ctx) { device_t dev = iflib_get_dev(ctx); struct sysctl_oid_list *child, *oid_list; struct sysctl_ctx_list *ctx_list; struct sysctl_oid *node; ctx_list = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", CTLFLAG_RD, NULL, "IFLIB fields"); oid_list = SYSCTL_CHILDREN(node); SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0, "driver version"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, "# of txqs to use, 0 => use default #"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, "# of rxqs to use, 0 => use default #"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, "permit #txq != #rxq"); SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix", CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, "disable MSIX (default 0)"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget", CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, "set the rx budget"); /* XXX change for per-queue sizes */ SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A", "list of # of tx descriptors to use, 0 = use default #"); SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A", "list of # of rx descriptors to use, 0 = use default #"); } static void iflib_add_device_sysctl_post(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = iflib_get_dev(ctx); struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx_list; iflib_fl_t fl; iflib_txq_t txq; iflib_rxq_t rxq; int i, j; char namebuf[NAME_BUFLEN]; char *qfmt; struct sysctl_oid *queue_node, *fl_node, *node; struct sysctl_oid_list *queue_list, *fl_list; ctx_list = device_get_sysctl_ctx(dev); node = ctx->ifc_sysctl_node; child = SYSCTL_CHILDREN(node); if (scctx->isc_ntxqsets > 100) qfmt = "txq%03d"; else if (scctx->isc_ntxqsets > 10) qfmt = "txq%02d"; else qfmt = "txq%d"; for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { snprintf(namebuf, NAME_BUFLEN, qfmt, i); queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); #if MEMORY_LOGGING SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", CTLFLAG_RD, &txq->ift_dequeued, "total mbufs freed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", CTLFLAG_RD, &txq->ift_enqueued, "total mbufs enqueued"); #endif SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", CTLFLAG_RD, &txq->ift_mbuf_defrag, "# of times m_defrag was called"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", CTLFLAG_RD, &txq->ift_pullups, "# of times m_pullup was called"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txq->ift_no_desc_avail, "# of times no descriptors were available"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", CTLFLAG_RD, &txq->ift_map_failed, "# of times dma map failed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", CTLFLAG_RD, &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", CTLFLAG_RD, &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", CTLFLAG_RD, &txq->ift_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", CTLFLAG_RD, &txq->ift_cidx, 1, "Consumer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", CTLFLAG_RD, &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", CTLFLAG_RD, &txq->ift_in_use, 1, "descriptors in use"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", CTLFLAG_RD, &txq->ift_processed, "descriptors procesed for clean"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", CTLFLAG_RD, &txq->ift_cleaned, "total cleaned"); SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0, mp_ring_state_handler, "A", "soft ring state"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", CTLFLAG_RD, &txq->ift_br->enqueues, "# of enqueues to the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", CTLFLAG_RD, &txq->ift_br->drops, "# of drops in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", CTLFLAG_RD, &txq->ift_br->starts, "# of normal consumer starts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", CTLFLAG_RD, &txq->ift_br->stalls, "# of consumer stalls in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", CTLFLAG_RD, &txq->ift_br->restarts, "# of consumer restarts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", CTLFLAG_RD, &txq->ift_br->abdications, "# of consumer abdications in the mp_ring for this queue"); } if (scctx->isc_nrxqsets > 100) qfmt = "rxq%03d"; else if (scctx->isc_nrxqsets > 10) qfmt = "rxq%02d"; else qfmt = "rxq%d"; for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { snprintf(namebuf, NAME_BUFLEN, qfmt, i); queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); if (sctx->isc_flags & IFLIB_HAS_RXCQ) { SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx", CTLFLAG_RD, &rxq->ifr_cq_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", CTLFLAG_RD, &rxq->ifr_cq_cidx, 1, "Consumer Index"); } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "freelist Name"); fl_list = SYSCTL_CHILDREN(fl_node); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", CTLFLAG_RD, &fl->ifl_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", CTLFLAG_RD, &fl->ifl_cidx, 1, "Consumer Index"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", CTLFLAG_RD, &fl->ifl_credits, 1, "credits available"); #if MEMORY_LOGGING SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", CTLFLAG_RD, &fl->ifl_m_enqueued, "mbufs allocated"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", CTLFLAG_RD, &fl->ifl_m_dequeued, "mbufs freed"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", CTLFLAG_RD, &fl->ifl_cl_enqueued, "clusters allocated"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", CTLFLAG_RD, &fl->ifl_cl_dequeued, "clusters freed"); #endif } } } #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * iflib_fixup_rx(struct mbuf *m) { struct mbuf *n; if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); m->m_data += ETHER_HDR_LEN; n = m; } else { MGETHDR(n, M_NOWAIT, MT_DATA); if (n == NULL) { m_freem(m); return (NULL); } bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); m->m_data += ETHER_HDR_LEN; m->m_len -= ETHER_HDR_LEN; n->m_len = ETHER_HDR_LEN; M_MOVE_PKTHDR(n, m); n->m_next = m; } return (n); } #endif Index: projects/runtime-coverage/sys/powerpc/conf/QORIQ64 =================================================================== --- projects/runtime-coverage/sys/powerpc/conf/QORIQ64 (revision 325209) +++ projects/runtime-coverage/sys/powerpc/conf/QORIQ64 (revision 325210) @@ -1,103 +1,105 @@ # # Custom kernel for Freescale QorIQ (P5xxx, Txxxx) based boards, like # AmigaOne X5000 # # $FreeBSD$ # cpu BOOKE cpu BOOKE_E500 ident MPC85XX machine powerpc powerpc64 +include "dpaa/config.dpaa" makeoptions DEBUG="-Wa,-me500 -g" makeoptions WERROR="-Werror -Wno-format -Wno-redundant-decls" makeoptions NO_MODULES=yes #options EARLY_PRINTF options FPU_EMU options BOOTVERBOSE=1 options _KPOSIX_PRIORITY_SCHEDULING options ALT_BREAK_TO_DEBUGGER options BREAK_TO_DEBUGGER options BOOTP options BOOTP_NFSROOT #options BOOTP_NFSV3 options CD9660 #options COMPAT_43 options COMPAT_FREEBSD32 #Compatible with FreeBSD/powerpc binaries options DDB #options DEADLKRES options DEVICE_POLLING #options DIAGNOSTIC options FDT #makeoptions FDT_DTS_FILE=mpc8555cds.dts options FFS options GDB options GEOM_PART_GPT options INET options INET6 options TCP_HHOOK # hhook(9) framework for TCP options INVARIANTS options INVARIANT_SUPPORT options KDB options KTRACE options MD_ROOT options MPC85XX options MSDOSFS options NFS_ROOT options NFSCL options NFSLOCKD options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options PROCFS options PSEUDOFS options SCHED_ULE options CAPABILITIES options CAPABILITY_MODE options SMP options SYSVMSG options SYSVSEM options SYSVSHM options WITNESS options WITNESS_SKIPSPIN device ata device bpf device cfi device crypto device cryptodev device da device ds1553 device em device alc +device dpaa device ether device fxp device gpio device gpiopower device iic device iicbus #device isa device loop device md device miibus device mmc device mmcsd device pass device pci device random #device rl device scbus device scc device sdhci device sec device tun device uart options USB_DEBUG # enable debug msgs #device uhci device ehci device umass device usb device vlan Index: projects/runtime-coverage =================================================================== --- projects/runtime-coverage (revision 325209) +++ projects/runtime-coverage (revision 325210) Property changes on: projects/runtime-coverage ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r325200-325209